Loading...
1/*
2 * Access kernel memory without faulting -- s390 specific implementation.
3 *
4 * Copyright IBM Corp. 2009, 2015
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/uaccess.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/gfp.h>
15#include <linux/cpu.h>
16#include <asm/ctl_reg.h>
17#include <asm/io.h>
18
19static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
20{
21 unsigned long aligned, offset, count;
22 char tmp[8];
23
24 aligned = (unsigned long) dst & ~7UL;
25 offset = (unsigned long) dst & 7UL;
26 size = min(8UL - offset, size);
27 count = size - 1;
28 asm volatile(
29 " bras 1,0f\n"
30 " mvc 0(1,%4),0(%5)\n"
31 "0: mvc 0(8,%3),0(%0)\n"
32 " ex %1,0(1)\n"
33 " lg %1,0(%3)\n"
34 " lra %0,0(%0)\n"
35 " sturg %1,%0\n"
36 : "+&a" (aligned), "+&a" (count), "=m" (tmp)
37 : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
38 : "cc", "memory", "1");
39 return size;
40}
41
42/*
43 * s390_kernel_write - write to kernel memory bypassing DAT
44 * @dst: destination address
45 * @src: source address
46 * @size: number of bytes to copy
47 *
48 * This function writes to kernel memory bypassing DAT and possible page table
49 * write protection. It writes to the destination using the sturg instruction.
50 * Therefore we have a read-modify-write sequence: the function reads eight
51 * bytes from destination at an eight byte boundary, modifies the bytes
52 * requested and writes the result back in a loop.
53 *
54 * Note: this means that this function may not be called concurrently on
55 * several cpus with overlapping words, since this may potentially
56 * cause data corruption.
57 */
58void notrace s390_kernel_write(void *dst, const void *src, size_t size)
59{
60 long copied;
61
62 while (size) {
63 copied = s390_kernel_write_odd(dst, src, size);
64 dst += copied;
65 src += copied;
66 size -= copied;
67 }
68}
69
70static int __memcpy_real(void *dest, void *src, size_t count)
71{
72 register unsigned long _dest asm("2") = (unsigned long) dest;
73 register unsigned long _len1 asm("3") = (unsigned long) count;
74 register unsigned long _src asm("4") = (unsigned long) src;
75 register unsigned long _len2 asm("5") = (unsigned long) count;
76 int rc = -EFAULT;
77
78 asm volatile (
79 "0: mvcle %1,%2,0x0\n"
80 "1: jo 0b\n"
81 " lhi %0,0x0\n"
82 "2:\n"
83 EX_TABLE(1b,2b)
84 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
85 "+d" (_len2), "=m" (*((long *) dest))
86 : "m" (*((long *) src))
87 : "cc", "memory");
88 return rc;
89}
90
91/*
92 * Copy memory in real mode (kernel to kernel)
93 */
94int memcpy_real(void *dest, void *src, size_t count)
95{
96 int irqs_disabled, rc;
97 unsigned long flags;
98
99 if (!count)
100 return 0;
101 flags = __arch_local_irq_stnsm(0xf8UL);
102 irqs_disabled = arch_irqs_disabled_flags(flags);
103 if (!irqs_disabled)
104 trace_hardirqs_off();
105 rc = __memcpy_real(dest, src, count);
106 if (!irqs_disabled)
107 trace_hardirqs_on();
108 __arch_local_irq_ssm(flags);
109 return rc;
110}
111
112/*
113 * Copy memory in absolute mode (kernel to kernel)
114 */
115void memcpy_absolute(void *dest, void *src, size_t count)
116{
117 unsigned long cr0, flags, prefix;
118
119 flags = arch_local_irq_save();
120 __ctl_store(cr0, 0, 0);
121 __ctl_clear_bit(0, 28); /* disable lowcore protection */
122 prefix = store_prefix();
123 if (prefix) {
124 local_mcck_disable();
125 set_prefix(0);
126 memcpy(dest, src, count);
127 set_prefix(prefix);
128 local_mcck_enable();
129 } else {
130 memcpy(dest, src, count);
131 }
132 __ctl_load(cr0, 0, 0);
133 arch_local_irq_restore(flags);
134}
135
136/*
137 * Copy memory from kernel (real) to user (virtual)
138 */
139int copy_to_user_real(void __user *dest, void *src, unsigned long count)
140{
141 int offs = 0, size, rc;
142 char *buf;
143
144 buf = (char *) __get_free_page(GFP_KERNEL);
145 if (!buf)
146 return -ENOMEM;
147 rc = -EFAULT;
148 while (offs < count) {
149 size = min(PAGE_SIZE, count - offs);
150 if (memcpy_real(buf, src + offs, size))
151 goto out;
152 if (copy_to_user(dest + offs, buf, size))
153 goto out;
154 offs += size;
155 }
156 rc = 0;
157out:
158 free_page((unsigned long) buf);
159 return rc;
160}
161
162/*
163 * Check if physical address is within prefix or zero page
164 */
165static int is_swapped(unsigned long addr)
166{
167 unsigned long lc;
168 int cpu;
169
170 if (addr < sizeof(struct lowcore))
171 return 1;
172 for_each_online_cpu(cpu) {
173 lc = (unsigned long) lowcore_ptr[cpu];
174 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
175 continue;
176 return 1;
177 }
178 return 0;
179}
180
181/*
182 * Convert a physical pointer for /dev/mem access
183 *
184 * For swapped prefix pages a new buffer is returned that contains a copy of
185 * the absolute memory. The buffer size is maximum one page large.
186 */
187void *xlate_dev_mem_ptr(phys_addr_t addr)
188{
189 void *bounce = (void *) addr;
190 unsigned long size;
191
192 get_online_cpus();
193 preempt_disable();
194 if (is_swapped(addr)) {
195 size = PAGE_SIZE - (addr & ~PAGE_MASK);
196 bounce = (void *) __get_free_page(GFP_ATOMIC);
197 if (bounce)
198 memcpy_absolute(bounce, (void *) addr, size);
199 }
200 preempt_enable();
201 put_online_cpus();
202 return bounce;
203}
204
205/*
206 * Free converted buffer for /dev/mem access (if necessary)
207 */
208void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
209{
210 if ((void *) addr != buf)
211 free_page((unsigned long) buf);
212}
1/*
2 * Access kernel memory without faulting -- s390 specific implementation.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/uaccess.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/gfp.h>
15#include <linux/cpu.h>
16#include <asm/ctl_reg.h>
17#include <asm/io.h>
18
19/*
20 * This function writes to kernel memory bypassing DAT and possible
21 * write protection. It copies one to four bytes from src to dst
22 * using the stura instruction.
23 * Returns the number of bytes copied or -EFAULT.
24 */
25static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
26{
27 unsigned long count, aligned;
28 int offset, mask;
29 int rc = -EFAULT;
30
31 aligned = (unsigned long) dst & ~3UL;
32 offset = (unsigned long) dst & 3;
33 count = min_t(unsigned long, 4 - offset, size);
34 mask = (0xf << (4 - count)) & 0xf;
35 mask >>= offset;
36 asm volatile(
37 " bras 1,0f\n"
38 " icm 0,0,0(%3)\n"
39 "0: l 0,0(%1)\n"
40 " lra %1,0(%1)\n"
41 "1: ex %2,0(1)\n"
42 "2: stura 0,%1\n"
43 " la %0,0\n"
44 "3:\n"
45 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
46 : "+d" (rc), "+a" (aligned)
47 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
48 return rc ? rc : count;
49}
50
51long probe_kernel_write(void *dst, const void *src, size_t size)
52{
53 long copied = 0;
54
55 while (size) {
56 copied = probe_kernel_write_odd(dst, src, size);
57 if (copied < 0)
58 break;
59 dst += copied;
60 src += copied;
61 size -= copied;
62 }
63 return copied < 0 ? -EFAULT : 0;
64}
65
66static int __memcpy_real(void *dest, void *src, size_t count)
67{
68 register unsigned long _dest asm("2") = (unsigned long) dest;
69 register unsigned long _len1 asm("3") = (unsigned long) count;
70 register unsigned long _src asm("4") = (unsigned long) src;
71 register unsigned long _len2 asm("5") = (unsigned long) count;
72 int rc = -EFAULT;
73
74 asm volatile (
75 "0: mvcle %1,%2,0x0\n"
76 "1: jo 0b\n"
77 " lhi %0,0x0\n"
78 "2:\n"
79 EX_TABLE(1b,2b)
80 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
81 "+d" (_len2), "=m" (*((long *) dest))
82 : "m" (*((long *) src))
83 : "cc", "memory");
84 return rc;
85}
86
87/*
88 * Copy memory in real mode (kernel to kernel)
89 */
90int memcpy_real(void *dest, void *src, size_t count)
91{
92 unsigned long flags;
93 int rc;
94
95 if (!count)
96 return 0;
97 local_irq_save(flags);
98 __arch_local_irq_stnsm(0xfbUL);
99 rc = __memcpy_real(dest, src, count);
100 local_irq_restore(flags);
101 return rc;
102}
103
104/*
105 * Copy memory in absolute mode (kernel to kernel)
106 */
107void memcpy_absolute(void *dest, void *src, size_t count)
108{
109 unsigned long cr0, flags, prefix;
110
111 flags = arch_local_irq_save();
112 __ctl_store(cr0, 0, 0);
113 __ctl_clear_bit(0, 28); /* disable lowcore protection */
114 prefix = store_prefix();
115 if (prefix) {
116 local_mcck_disable();
117 set_prefix(0);
118 memcpy(dest, src, count);
119 set_prefix(prefix);
120 local_mcck_enable();
121 } else {
122 memcpy(dest, src, count);
123 }
124 __ctl_load(cr0, 0, 0);
125 arch_local_irq_restore(flags);
126}
127
128/*
129 * Copy memory from kernel (real) to user (virtual)
130 */
131int copy_to_user_real(void __user *dest, void *src, unsigned long count)
132{
133 int offs = 0, size, rc;
134 char *buf;
135
136 buf = (char *) __get_free_page(GFP_KERNEL);
137 if (!buf)
138 return -ENOMEM;
139 rc = -EFAULT;
140 while (offs < count) {
141 size = min(PAGE_SIZE, count - offs);
142 if (memcpy_real(buf, src + offs, size))
143 goto out;
144 if (copy_to_user(dest + offs, buf, size))
145 goto out;
146 offs += size;
147 }
148 rc = 0;
149out:
150 free_page((unsigned long) buf);
151 return rc;
152}
153
154/*
155 * Check if physical address is within prefix or zero page
156 */
157static int is_swapped(unsigned long addr)
158{
159 unsigned long lc;
160 int cpu;
161
162 if (addr < sizeof(struct _lowcore))
163 return 1;
164 for_each_online_cpu(cpu) {
165 lc = (unsigned long) lowcore_ptr[cpu];
166 if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
167 continue;
168 return 1;
169 }
170 return 0;
171}
172
173/*
174 * Convert a physical pointer for /dev/mem access
175 *
176 * For swapped prefix pages a new buffer is returned that contains a copy of
177 * the absolute memory. The buffer size is maximum one page large.
178 */
179void *xlate_dev_mem_ptr(unsigned long addr)
180{
181 void *bounce = (void *) addr;
182 unsigned long size;
183
184 get_online_cpus();
185 preempt_disable();
186 if (is_swapped(addr)) {
187 size = PAGE_SIZE - (addr & ~PAGE_MASK);
188 bounce = (void *) __get_free_page(GFP_ATOMIC);
189 if (bounce)
190 memcpy_absolute(bounce, (void *) addr, size);
191 }
192 preempt_enable();
193 put_online_cpus();
194 return bounce;
195}
196
197/*
198 * Free converted buffer for /dev/mem access (if necessary)
199 */
200void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
201{
202 if ((void *) addr != buf)
203 free_page((unsigned long) buf);
204}