Loading...
1/*
2 * Access kernel memory without faulting -- s390 specific implementation.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/uaccess.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <asm/system.h>
15
16/*
17 * This function writes to kernel memory bypassing DAT and possible
18 * write protection. It copies one to four bytes from src to dst
19 * using the stura instruction.
20 * Returns the number of bytes copied or -EFAULT.
21 */
22static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
23{
24 unsigned long count, aligned;
25 int offset, mask;
26 int rc = -EFAULT;
27
28 aligned = (unsigned long) dst & ~3UL;
29 offset = (unsigned long) dst & 3;
30 count = min_t(unsigned long, 4 - offset, size);
31 mask = (0xf << (4 - count)) & 0xf;
32 mask >>= offset;
33 asm volatile(
34 " bras 1,0f\n"
35 " icm 0,0,0(%3)\n"
36 "0: l 0,0(%1)\n"
37 " lra %1,0(%1)\n"
38 "1: ex %2,0(1)\n"
39 "2: stura 0,%1\n"
40 " la %0,0\n"
41 "3:\n"
42 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
43 : "+d" (rc), "+a" (aligned)
44 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
45 return rc ? rc : count;
46}
47
48long probe_kernel_write(void *dst, const void *src, size_t size)
49{
50 long copied = 0;
51
52 while (size) {
53 copied = probe_kernel_write_odd(dst, src, size);
54 if (copied < 0)
55 break;
56 dst += copied;
57 src += copied;
58 size -= copied;
59 }
60 return copied < 0 ? -EFAULT : 0;
61}
62
63int memcpy_real(void *dest, void *src, size_t count)
64{
65 register unsigned long _dest asm("2") = (unsigned long) dest;
66 register unsigned long _len1 asm("3") = (unsigned long) count;
67 register unsigned long _src asm("4") = (unsigned long) src;
68 register unsigned long _len2 asm("5") = (unsigned long) count;
69 unsigned long flags;
70 int rc = -EFAULT;
71
72 if (!count)
73 return 0;
74 flags = __arch_local_irq_stnsm(0xf8UL);
75 asm volatile (
76 "0: mvcle %1,%2,0x0\n"
77 "1: jo 0b\n"
78 " lhi %0,0x0\n"
79 "2:\n"
80 EX_TABLE(1b,2b)
81 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
82 "+d" (_len2), "=m" (*((long *) dest))
83 : "m" (*((long *) src))
84 : "cc", "memory");
85 arch_local_irq_restore(flags);
86 return rc;
87}
88
89/*
90 * Copy memory to absolute zero
91 */
92void copy_to_absolute_zero(void *dest, void *src, size_t count)
93{
94 unsigned long cr0;
95
96 BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
97 preempt_disable();
98 __ctl_store(cr0, 0, 0);
99 __ctl_clear_bit(0, 28); /* disable lowcore protection */
100 memcpy_real(dest + store_prefix(), src, count);
101 __ctl_load(cr0, 0, 0);
102 preempt_enable();
103}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Access kernel memory without faulting -- s390 specific implementation.
4 *
5 * Copyright IBM Corp. 2009, 2015
6 *
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 */
10
11#include <linux/uaccess.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/errno.h>
15#include <linux/gfp.h>
16#include <linux/cpu.h>
17#include <asm/ctl_reg.h>
18#include <asm/io.h>
19#include <asm/stacktrace.h>
20
21static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
22{
23 unsigned long aligned, offset, count;
24 char tmp[8];
25
26 aligned = (unsigned long) dst & ~7UL;
27 offset = (unsigned long) dst & 7UL;
28 size = min(8UL - offset, size);
29 count = size - 1;
30 asm volatile(
31 " bras 1,0f\n"
32 " mvc 0(1,%4),0(%5)\n"
33 "0: mvc 0(8,%3),0(%0)\n"
34 " ex %1,0(1)\n"
35 " lg %1,0(%3)\n"
36 " lra %0,0(%0)\n"
37 " sturg %1,%0\n"
38 : "+&a" (aligned), "+&a" (count), "=m" (tmp)
39 : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
40 : "cc", "memory", "1");
41 return size;
42}
43
44/*
45 * s390_kernel_write - write to kernel memory bypassing DAT
46 * @dst: destination address
47 * @src: source address
48 * @size: number of bytes to copy
49 *
50 * This function writes to kernel memory bypassing DAT and possible page table
51 * write protection. It writes to the destination using the sturg instruction.
52 * Therefore we have a read-modify-write sequence: the function reads eight
53 * bytes from destination at an eight byte boundary, modifies the bytes
54 * requested and writes the result back in a loop.
55 */
56static DEFINE_SPINLOCK(s390_kernel_write_lock);
57
58void notrace s390_kernel_write(void *dst, const void *src, size_t size)
59{
60 unsigned long flags;
61 long copied;
62
63 spin_lock_irqsave(&s390_kernel_write_lock, flags);
64 while (size) {
65 copied = s390_kernel_write_odd(dst, src, size);
66 dst += copied;
67 src += copied;
68 size -= copied;
69 }
70 spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
71}
72
73static int __memcpy_real(void *dest, void *src, size_t count)
74{
75 register unsigned long _dest asm("2") = (unsigned long) dest;
76 register unsigned long _len1 asm("3") = (unsigned long) count;
77 register unsigned long _src asm("4") = (unsigned long) src;
78 register unsigned long _len2 asm("5") = (unsigned long) count;
79 int rc = -EFAULT;
80
81 asm volatile (
82 "0: mvcle %1,%2,0x0\n"
83 "1: jo 0b\n"
84 " lhi %0,0x0\n"
85 "2:\n"
86 EX_TABLE(1b,2b)
87 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
88 "+d" (_len2), "=m" (*((long *) dest))
89 : "m" (*((long *) src))
90 : "cc", "memory");
91 return rc;
92}
93
94static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
95 unsigned long count)
96{
97 int irqs_disabled, rc;
98 unsigned long flags;
99
100 if (!count)
101 return 0;
102 flags = __arch_local_irq_stnsm(0xf8UL);
103 irqs_disabled = arch_irqs_disabled_flags(flags);
104 if (!irqs_disabled)
105 trace_hardirqs_off();
106 rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
107 if (!irqs_disabled)
108 trace_hardirqs_on();
109 __arch_local_irq_ssm(flags);
110 return rc;
111}
112
113/*
114 * Copy memory in real mode (kernel to kernel)
115 */
116int memcpy_real(void *dest, void *src, size_t count)
117{
118 if (S390_lowcore.nodat_stack != 0)
119 return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
120 3, dest, src, count);
121 /*
122 * This is a really early memcpy_real call, the stacks are
123 * not set up yet. Just call _memcpy_real on the early boot
124 * stack
125 */
126 return _memcpy_real((unsigned long) dest,(unsigned long) src,
127 (unsigned long) count);
128}
129
130/*
131 * Copy memory in absolute mode (kernel to kernel)
132 */
133void memcpy_absolute(void *dest, void *src, size_t count)
134{
135 unsigned long cr0, flags, prefix;
136
137 flags = arch_local_irq_save();
138 __ctl_store(cr0, 0, 0);
139 __ctl_clear_bit(0, 28); /* disable lowcore protection */
140 prefix = store_prefix();
141 if (prefix) {
142 local_mcck_disable();
143 set_prefix(0);
144 memcpy(dest, src, count);
145 set_prefix(prefix);
146 local_mcck_enable();
147 } else {
148 memcpy(dest, src, count);
149 }
150 __ctl_load(cr0, 0, 0);
151 arch_local_irq_restore(flags);
152}
153
154/*
155 * Copy memory from kernel (real) to user (virtual)
156 */
157int copy_to_user_real(void __user *dest, void *src, unsigned long count)
158{
159 int offs = 0, size, rc;
160 char *buf;
161
162 buf = (char *) __get_free_page(GFP_KERNEL);
163 if (!buf)
164 return -ENOMEM;
165 rc = -EFAULT;
166 while (offs < count) {
167 size = min(PAGE_SIZE, count - offs);
168 if (memcpy_real(buf, src + offs, size))
169 goto out;
170 if (copy_to_user(dest + offs, buf, size))
171 goto out;
172 offs += size;
173 }
174 rc = 0;
175out:
176 free_page((unsigned long) buf);
177 return rc;
178}
179
180/*
181 * Check if physical address is within prefix or zero page
182 */
183static int is_swapped(unsigned long addr)
184{
185 unsigned long lc;
186 int cpu;
187
188 if (addr < sizeof(struct lowcore))
189 return 1;
190 for_each_online_cpu(cpu) {
191 lc = (unsigned long) lowcore_ptr[cpu];
192 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
193 continue;
194 return 1;
195 }
196 return 0;
197}
198
199/*
200 * Convert a physical pointer for /dev/mem access
201 *
202 * For swapped prefix pages a new buffer is returned that contains a copy of
203 * the absolute memory. The buffer size is maximum one page large.
204 */
205void *xlate_dev_mem_ptr(phys_addr_t addr)
206{
207 void *bounce = (void *) addr;
208 unsigned long size;
209
210 get_online_cpus();
211 preempt_disable();
212 if (is_swapped(addr)) {
213 size = PAGE_SIZE - (addr & ~PAGE_MASK);
214 bounce = (void *) __get_free_page(GFP_ATOMIC);
215 if (bounce)
216 memcpy_absolute(bounce, (void *) addr, size);
217 }
218 preempt_enable();
219 put_online_cpus();
220 return bounce;
221}
222
223/*
224 * Free converted buffer for /dev/mem access (if necessary)
225 */
226void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
227{
228 if ((void *) addr != buf)
229 free_page((unsigned long) buf);
230}