Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * access guest memory
  3 *
  4 * Copyright IBM Corp. 2008, 2014
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License (version 2 only)
  8 * as published by the Free Software Foundation.
  9 *
 10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 11 */
 12
 13#ifndef __KVM_S390_GACCESS_H
 14#define __KVM_S390_GACCESS_H
 15
 16#include <linux/compiler.h>
 17#include <linux/kvm_host.h>
 18#include <linux/uaccess.h>
 19#include <linux/ptrace.h>
 20#include "kvm-s390.h"
 21
 22/**
 23 * kvm_s390_real_to_abs - convert guest real address to guest absolute address
 24 * @vcpu - guest virtual cpu
 25 * @gra - guest real address
 26 *
 27 * Returns the guest absolute address that corresponds to the passed guest real
 28 * address @gra of a virtual guest cpu by applying its prefix.
 29 */
 30static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
 31						 unsigned long gra)
 32{
 33	unsigned long prefix  = kvm_s390_get_prefix(vcpu);
 34
 35	if (gra < 2 * PAGE_SIZE)
 36		gra += prefix;
 37	else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
 38		gra -= prefix;
 39	return gra;
 40}
 41
 42/**
 43 * kvm_s390_logical_to_effective - convert guest logical to effective address
 44 * @vcpu: guest virtual cpu
 45 * @ga: guest logical address
 46 *
 47 * Convert a guest vcpu logical address to a guest vcpu effective address by
 48 * applying the rules of the vcpu's addressing mode defined by PSW bits 31
 49 * and 32 (extendended/basic addressing mode).
 50 *
 51 * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
 52 * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
 53 * of @ga will be zeroed and the remaining bits will be returned.
 54 */
 55static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
 56							  unsigned long ga)
 57{
 58	psw_t *psw = &vcpu->arch.sie_block->gpsw;
 59
 60	if (psw_bits(*psw).eaba == PSW_AMODE_64BIT)
 61		return ga;
 62	if (psw_bits(*psw).eaba == PSW_AMODE_31BIT)
 63		return ga & ((1UL << 31) - 1);
 64	return ga & ((1UL << 24) - 1);
 65}
 66
 67/*
 68 * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
 69 * which shall only be used to access the lowcore of a vcpu.
 70 * These functions should be used for e.g. interrupt handlers where no
 71 * guest memory access protection facilities, like key or low address
 72 * protection, are applicable.
 73 * At a later point guest vcpu lowcore access should happen via pinned
 74 * prefix pages, so that these pages can be accessed directly via the
 75 * kernel mapping. All of these *_lc functions can be removed then.
 76 */
 77
 78/**
 79 * put_guest_lc - write a simple variable to a guest vcpu's lowcore
 80 * @vcpu: virtual cpu
 81 * @x: value to copy to guest
 82 * @gra: vcpu's destination guest real address
 83 *
 84 * Copies a simple value from kernel space to a guest vcpu's lowcore.
 85 * The size of the variable may be 1, 2, 4 or 8 bytes. The destination
 86 * must be located in the vcpu's lowcore. Otherwise the result is undefined.
 87 *
 88 * Returns zero on success or -EFAULT on error.
 89 *
 90 * Note: an error indicates that either the kernel is out of memory or
 91 *	 the guest memory mapping is broken. In any case the best solution
 92 *	 would be to terminate the guest.
 93 *	 It is wrong to inject a guest exception.
 94 */
 95#define put_guest_lc(vcpu, x, gra)				\
 96({								\
 97	struct kvm_vcpu *__vcpu = (vcpu);			\
 98	__typeof__(*(gra)) __x = (x);				\
 99	unsigned long __gpa;					\
100								\
101	__gpa = (unsigned long)(gra);				\
102	__gpa += kvm_s390_get_prefix(__vcpu);			\
103	kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x));	\
104})
105
106/**
107 * write_guest_lc - copy data from kernel space to guest vcpu's lowcore
108 * @vcpu: virtual cpu
109 * @gra: vcpu's source guest real address
110 * @data: source address in kernel space
111 * @len: number of bytes to copy
112 *
113 * Copy data from kernel space to guest vcpu's lowcore. The entire range must
114 * be located within the vcpu's lowcore, otherwise the result is undefined.
115 *
116 * Returns zero on success or -EFAULT on error.
117 *
118 * Note: an error indicates that either the kernel is out of memory or
119 *	 the guest memory mapping is broken. In any case the best solution
120 *	 would be to terminate the guest.
121 *	 It is wrong to inject a guest exception.
122 */
123static inline __must_check
124int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
125		   unsigned long len)
126{
127	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
 
 
128
129	return kvm_write_guest(vcpu->kvm, gpa, data, len);
 
 
 
130}
131
132/**
133 * read_guest_lc - copy data from guest vcpu's lowcore to kernel space
134 * @vcpu: virtual cpu
135 * @gra: vcpu's source guest real address
136 * @data: destination address in kernel space
137 * @len: number of bytes to copy
138 *
139 * Copy data from guest vcpu's lowcore to kernel space. The entire range must
140 * be located within the vcpu's lowcore, otherwise the result is undefined.
141 *
142 * Returns zero on success or -EFAULT on error.
143 *
144 * Note: an error indicates that either the kernel is out of memory or
145 *	 the guest memory mapping is broken. In any case the best solution
146 *	 would be to terminate the guest.
147 *	 It is wrong to inject a guest exception.
148 */
149static inline __must_check
150int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
151		  unsigned long len)
152{
153	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
154
155	return kvm_read_guest(vcpu->kvm, gpa, data, len);
156}
157
158enum gacc_mode {
159	GACC_FETCH,
160	GACC_STORE,
161	GACC_IFETCH,
162};
163
164int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
165			    ar_t ar, unsigned long *gpa, enum gacc_mode mode);
166int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
167		    unsigned long length, enum gacc_mode mode);
168
169int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
170		 unsigned long len, enum gacc_mode mode);
171
172int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
173		      void *data, unsigned long len, enum gacc_mode mode);
174
175/**
176 * write_guest - copy data from kernel space to guest space
177 * @vcpu: virtual cpu
178 * @ga: guest address
179 * @ar: access register
180 * @data: source address in kernel space
181 * @len: number of bytes to copy
182 *
183 * Copy @len bytes from @data (kernel space) to @ga (guest address).
184 * In order to copy data to guest space the PSW of the vcpu is inspected:
185 * If DAT is off data will be copied to guest real or absolute memory.
186 * If DAT is on data will be copied to the address space as specified by
187 * the address space bits of the PSW:
188 * Primary, secondary, home space or access register mode.
189 * The addressing mode of the PSW is also inspected, so that address wrap
190 * around is taken into account for 24-, 31- and 64-bit addressing mode,
191 * if the to be copied data crosses page boundaries in guest address space.
192 * In addition also low address and DAT protection are inspected before
193 * copying any data (key protection is currently not implemented).
194 *
195 * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
196 * In case of an access exception (e.g. protection exception) pgm will contain
197 * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
198 * will inject a correct exception into the guest.
199 * If no access exception happened, the contents of pgm are undefined when
200 * this function returns.
201 *
202 * Returns:  - zero on success
203 *	     - a negative value if e.g. the guest mapping is broken or in
204 *	       case of out-of-memory. In this case the contents of pgm are
205 *	       undefined. Also parts of @data may have been copied to guest
206 *	       space.
207 *	     - a positive value if an access exception happened. In this case
208 *	       the returned value is the program interruption code and the
209 *	       contents of pgm may be used to inject an exception into the
210 *	       guest. No data has been copied to guest space.
211 *
212 * Note: in case an access exception is recognized no data has been copied to
213 *	 guest space (this is also true, if the to be copied data would cross
214 *	 one or more page boundaries in guest space).
215 *	 Therefore this function may be used for nullifying and suppressing
216 *	 instruction emulation.
217 *	 It may also be used for terminating instructions, if it is undefined
218 *	 if data has been changed in guest space in case of an exception.
219 */
220static inline __must_check
221int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
222		unsigned long len)
223{
224	return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
225}
226
227/**
228 * read_guest - copy data from guest space to kernel space
229 * @vcpu: virtual cpu
230 * @ga: guest address
231 * @ar: access register
232 * @data: destination address in kernel space
233 * @len: number of bytes to copy
234 *
235 * Copy @len bytes from @ga (guest address) to @data (kernel space).
236 *
237 * The behaviour of read_guest is identical to write_guest, except that
238 * data will be copied from guest space to kernel space.
239 */
240static inline __must_check
241int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
242	       unsigned long len)
243{
244	return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
245}
246
247/**
248 * read_guest_instr - copy instruction data from guest space to kernel space
249 * @vcpu: virtual cpu
250 * @data: destination address in kernel space
251 * @len: number of bytes to copy
252 *
253 * Copy @len bytes from the current psw address (guest space) to @data (kernel
254 * space).
255 *
256 * The behaviour of read_guest_instr is identical to read_guest, except that
257 * instruction data will be read from primary space when in home-space or
258 * address-space mode.
259 */
260static inline __must_check
261int read_guest_instr(struct kvm_vcpu *vcpu, void *data, unsigned long len)
262{
263	return access_guest(vcpu, vcpu->arch.sie_block->gpsw.addr, 0, data, len,
264			    GACC_IFETCH);
 
 
 
 
 
 
265}
266
267/**
268 * write_guest_abs - copy data from kernel space to guest space absolute
269 * @vcpu: virtual cpu
270 * @gpa: guest physical (absolute) address
271 * @data: source address in kernel space
272 * @len: number of bytes to copy
273 *
274 * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
275 * It is up to the caller to ensure that the entire guest memory range is
276 * valid memory before calling this function.
277 * Guest low address and key protection are not checked.
278 *
279 * Returns zero on success or -EFAULT on error.
280 *
281 * If an error occurs data may have been copied partially to guest memory.
282 */
283static inline __must_check
284int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
285		    unsigned long len)
286{
287	return kvm_write_guest(vcpu->kvm, gpa, data, len);
288}
289
290/**
291 * read_guest_abs - copy data from guest space absolute to kernel space
292 * @vcpu: virtual cpu
293 * @gpa: guest physical (absolute) address
294 * @data: destination address in kernel space
295 * @len: number of bytes to copy
296 *
297 * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
298 * It is up to the caller to ensure that the entire guest memory range is
299 * valid memory before calling this function.
300 * Guest key protection is not checked.
301 *
302 * Returns zero on success or -EFAULT on error.
303 *
304 * If an error occurs data may have been copied partially to kernel space.
305 */
306static inline __must_check
307int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
308		   unsigned long len)
309{
310	return kvm_read_guest(vcpu->kvm, gpa, data, len);
311}
312
313/**
314 * write_guest_real - copy data from kernel space to guest space real
315 * @vcpu: virtual cpu
316 * @gra: guest real address
317 * @data: source address in kernel space
318 * @len: number of bytes to copy
319 *
320 * Copy @len bytes from @data (kernel space) to @gra (guest real address).
321 * It is up to the caller to ensure that the entire guest memory range is
322 * valid memory before calling this function.
323 * Guest low address and key protection are not checked.
324 *
325 * Returns zero on success or -EFAULT on error.
326 *
327 * If an error occurs data may have been copied partially to guest memory.
328 */
329static inline __must_check
330int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
331		     unsigned long len)
332{
333	return access_guest_real(vcpu, gra, data, len, 1);
334}
335
336/**
337 * read_guest_real - copy data from guest space real to kernel space
338 * @vcpu: virtual cpu
339 * @gra: guest real address
340 * @data: destination address in kernel space
341 * @len: number of bytes to copy
342 *
343 * Copy @len bytes from @gra (guest real address) to @data (kernel space).
344 * It is up to the caller to ensure that the entire guest memory range is
345 * valid memory before calling this function.
346 * Guest key protection is not checked.
347 *
348 * Returns zero on success or -EFAULT on error.
349 *
350 * If an error occurs data may have been copied partially to kernel space.
351 */
352static inline __must_check
353int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
354		    unsigned long len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355{
356	return access_guest_real(vcpu, gra, data, len, 0);
357}
358
359void ipte_lock(struct kvm_vcpu *vcpu);
360void ipte_unlock(struct kvm_vcpu *vcpu);
361int ipte_lock_held(struct kvm_vcpu *vcpu);
362int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
 
 
 
 
 
 
363
364int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
365			  unsigned long saddr);
 
366
367#endif /* __KVM_S390_GACCESS_H */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v3.1
  1/*
  2 * access.h -  access guest memory
  3 *
  4 * Copyright IBM Corp. 2008,2009
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License (version 2 only)
  8 * as published by the Free Software Foundation.
  9 *
 10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 11 */
 12
 13#ifndef __KVM_S390_GACCESS_H
 14#define __KVM_S390_GACCESS_H
 15
 16#include <linux/compiler.h>
 17#include <linux/kvm_host.h>
 18#include <asm/uaccess.h>
 
 19#include "kvm-s390.h"
 20
 21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
 22					       unsigned long guestaddr)
 
 
 
 
 
 
 
 
 23{
 24	unsigned long prefix  = vcpu->arch.sie_block->prefix;
 25
 26	if (guestaddr < 2 * PAGE_SIZE)
 27		guestaddr += prefix;
 28	else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
 29		guestaddr -= prefix;
 30
 31	return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
 32}
 33
 34static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 35				u64 *result)
 
 
 
 
 
 
 
 
 
 
 
 
 36{
 37	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 38
 39	BUG_ON(guestaddr & 7);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40
 41	if (IS_ERR((void __force *) uptr))
 42		return PTR_ERR((void __force *) uptr);
 43
 44	return get_user(*result, (unsigned long __user *) uptr);
 45}
 46
 47static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 48				u32 *result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49{
 50	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 51
 52	BUG_ON(guestaddr & 3);
 53
 54	if (IS_ERR((void __force *) uptr))
 55		return PTR_ERR((void __force *) uptr);
 56
 57	return get_user(*result, (u32 __user *) uptr);
 58}
 59
 60static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 61				u16 *result)
 62{
 63	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 64
 65	BUG_ON(guestaddr & 1);
 66
 67	if (IS_ERR(uptr))
 68		return PTR_ERR(uptr);
 69
 70	return get_user(*result, (u16 __user *) uptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71}
 72
 73static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 74			       u8 *result)
 75{
 76	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 77
 78	if (IS_ERR((void __force *) uptr))
 79		return PTR_ERR((void __force *) uptr);
 80
 81	return get_user(*result, (u8 __user *) uptr);
 
 
 
 
 
 
 
 
 
 82}
 83
 84static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 85				u64 value)
 
 
 
 
 
 
 
 
 
 
 
 
 
 86{
 87	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 88
 89	BUG_ON(guestaddr & 7);
 90
 91	if (IS_ERR((void __force *) uptr))
 92		return PTR_ERR((void __force *) uptr);
 93
 94	return put_user(value, (u64 __user *) uptr);
 95}
 96
 97static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 98				u32 value)
 99{
100	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
101
102	BUG_ON(guestaddr & 3);
103
104	if (IS_ERR((void __force *) uptr))
105		return PTR_ERR((void __force *) uptr);
106
107	return put_user(value, (u32 __user *) uptr);
 
 
 
 
 
 
 
 
 
 
108}
109
110static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
111				u16 value)
112{
113	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
114
115	BUG_ON(guestaddr & 1);
116
117	if (IS_ERR((void __force *) uptr))
118		return PTR_ERR((void __force *) uptr);
119
120	return put_user(value, (u16 __user *) uptr);
 
 
 
 
 
 
 
 
 
 
121}
122
123static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
124			       u8 value)
125{
126	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
127
128	if (IS_ERR((void __force *) uptr))
129		return PTR_ERR((void __force *) uptr);
130
131	return put_user(value, (u8 __user *) uptr);
 
 
 
 
 
 
 
 
 
 
 
 
132}
133
134
135static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
136				       unsigned long guestdest,
137				       void *from, unsigned long n)
138{
139	int rc;
140	unsigned long i;
141	u8 *data = from;
142
143	for (i = 0; i < n; i++) {
144		rc = put_guest_u8(vcpu, guestdest++, *(data++));
145		if (rc < 0)
146			return rc;
147	}
148	return 0;
149}
150
151static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
152				       unsigned long guestdest,
153				       void *from, unsigned long n)
154{
155	int r;
156	void __user *uptr;
157	unsigned long size;
158
159	if (guestdest + n < guestdest)
160		return -EFAULT;
161
162	/* simple case: all within one segment table entry? */
163	if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164		uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
165
166		if (IS_ERR((void __force *) uptr))
167			return PTR_ERR((void __force *) uptr);
168
169		r = copy_to_user(uptr, from, n);
170
171		if (r)
172			r = -EFAULT;
173
174		goto out;
175	}
176
177	/* copy first segment */
178	uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
179
180	if (IS_ERR((void __force *) uptr))
181		return PTR_ERR((void __force *) uptr);
182
183	size = PMD_SIZE - (guestdest & ~PMD_MASK);
184
185	r = copy_to_user(uptr, from, size);
186
187	if (r) {
188		r = -EFAULT;
189		goto out;
190	}
191	from += size;
192	n -= size;
193	guestdest += size;
194
195	/* copy full segments */
196	while (n >= PMD_SIZE) {
197		uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
198
199		if (IS_ERR((void __force *) uptr))
200			return PTR_ERR((void __force *) uptr);
201
202		r = copy_to_user(uptr, from, PMD_SIZE);
203
204		if (r) {
205			r = -EFAULT;
206			goto out;
207		}
208		from += PMD_SIZE;
209		n -= PMD_SIZE;
210		guestdest += PMD_SIZE;
211	}
212
213	/* copy the tail segment */
214	if (n) {
215		uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
216
217		if (IS_ERR((void __force *) uptr))
218			return PTR_ERR((void __force *) uptr);
219
220		r = copy_to_user(uptr, from, n);
221
222		if (r)
223			r = -EFAULT;
224	}
225out:
226	return r;
227}
228
229static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230					 unsigned long guestdest,
231					 void *from, unsigned long n)
232{
233	return __copy_to_guest_fast(vcpu, guestdest, from, n);
234}
235
236static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
237				void *from, unsigned long n)
238{
239	unsigned long prefix  = vcpu->arch.sie_block->prefix;
240
241	if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
242		goto slowpath;
243
244	if ((guestdest < prefix) && (guestdest + n > prefix))
245		goto slowpath;
246
247	if ((guestdest < prefix + 2 * PAGE_SIZE)
248	    && (guestdest + n > prefix + 2 * PAGE_SIZE))
249		goto slowpath;
250
251	if (guestdest < 2 * PAGE_SIZE)
252		guestdest += prefix;
253	else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
254		guestdest -= prefix;
255
256	return __copy_to_guest_fast(vcpu, guestdest, from, n);
257slowpath:
258	return __copy_to_guest_slow(vcpu, guestdest, from, n);
259}
260
261static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
262					 unsigned long guestsrc,
263					 unsigned long n)
264{
265	int rc;
266	unsigned long i;
267	u8 *data = to;
268
269	for (i = 0; i < n; i++) {
270		rc = get_guest_u8(vcpu, guestsrc++, data++);
271		if (rc < 0)
272			return rc;
273	}
274	return 0;
275}
276
277static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
278					 unsigned long guestsrc,
279					 unsigned long n)
280{
281	int r;
282	void __user *uptr;
283	unsigned long size;
284
285	if (guestsrc + n < guestsrc)
286		return -EFAULT;
287
288	/* simple case: all within one segment table entry? */
289	if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290		uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
291
292		if (IS_ERR((void __force *) uptr))
293			return PTR_ERR((void __force *) uptr);
294
295		r = copy_from_user(to, uptr, n);
296
297		if (r)
298			r = -EFAULT;
299
300		goto out;
301	}
302
303	/* copy first segment */
304	uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
305
306	if (IS_ERR((void __force *) uptr))
307		return PTR_ERR((void __force *) uptr);
308
309	size = PMD_SIZE - (guestsrc & ~PMD_MASK);
310
311	r = copy_from_user(to, uptr, size);
312
313	if (r) {
314		r = -EFAULT;
315		goto out;
316	}
317	to += size;
318	n -= size;
319	guestsrc += size;
320
321	/* copy full segments */
322	while (n >= PMD_SIZE) {
323		uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
324
325		if (IS_ERR((void __force *) uptr))
326			return PTR_ERR((void __force *) uptr);
327
328		r = copy_from_user(to, uptr, PMD_SIZE);
329
330		if (r) {
331			r = -EFAULT;
332			goto out;
333		}
334		to += PMD_SIZE;
335		n -= PMD_SIZE;
336		guestsrc += PMD_SIZE;
337	}
338
339	/* copy the tail segment */
340	if (n) {
341		uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
342
343		if (IS_ERR((void __force *) uptr))
344			return PTR_ERR((void __force *) uptr);
345
346		r = copy_from_user(to, uptr, n);
347
348		if (r)
349			r = -EFAULT;
350	}
351out:
352	return r;
353}
354
355static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
356					   unsigned long guestsrc,
357					   unsigned long n)
358{
359	return __copy_from_guest_fast(vcpu, to, guestsrc, n);
360}
361
362static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
363				  unsigned long guestsrc, unsigned long n)
364{
365	unsigned long prefix  = vcpu->arch.sie_block->prefix;
366
367	if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
368		goto slowpath;
369
370	if ((guestsrc < prefix) && (guestsrc + n > prefix))
371		goto slowpath;
372
373	if ((guestsrc < prefix + 2 * PAGE_SIZE)
374	    && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375		goto slowpath;
376
377	if (guestsrc < 2 * PAGE_SIZE)
378		guestsrc += prefix;
379	else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380		guestsrc -= prefix;
381
382	return __copy_from_guest_fast(vcpu, to, guestsrc, n);
383slowpath:
384	return __copy_from_guest_slow(vcpu, to, guestsrc, n);
385}
386#endif