Loading...
1/*
2 * Access kernel memory without faulting -- s390 specific implementation.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/uaccess.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <asm/system.h>
15
16/*
17 * This function writes to kernel memory bypassing DAT and possible
18 * write protection. It copies one to four bytes from src to dst
19 * using the stura instruction.
20 * Returns the number of bytes copied or -EFAULT.
21 */
22static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
23{
24 unsigned long count, aligned;
25 int offset, mask;
26 int rc = -EFAULT;
27
28 aligned = (unsigned long) dst & ~3UL;
29 offset = (unsigned long) dst & 3;
30 count = min_t(unsigned long, 4 - offset, size);
31 mask = (0xf << (4 - count)) & 0xf;
32 mask >>= offset;
33 asm volatile(
34 " bras 1,0f\n"
35 " icm 0,0,0(%3)\n"
36 "0: l 0,0(%1)\n"
37 " lra %1,0(%1)\n"
38 "1: ex %2,0(1)\n"
39 "2: stura 0,%1\n"
40 " la %0,0\n"
41 "3:\n"
42 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
43 : "+d" (rc), "+a" (aligned)
44 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
45 return rc ? rc : count;
46}
47
48long probe_kernel_write(void *dst, const void *src, size_t size)
49{
50 long copied = 0;
51
52 while (size) {
53 copied = probe_kernel_write_odd(dst, src, size);
54 if (copied < 0)
55 break;
56 dst += copied;
57 src += copied;
58 size -= copied;
59 }
60 return copied < 0 ? -EFAULT : 0;
61}
62
63int memcpy_real(void *dest, void *src, size_t count)
64{
65 register unsigned long _dest asm("2") = (unsigned long) dest;
66 register unsigned long _len1 asm("3") = (unsigned long) count;
67 register unsigned long _src asm("4") = (unsigned long) src;
68 register unsigned long _len2 asm("5") = (unsigned long) count;
69 unsigned long flags;
70 int rc = -EFAULT;
71
72 if (!count)
73 return 0;
74 flags = __arch_local_irq_stnsm(0xf8UL);
75 asm volatile (
76 "0: mvcle %1,%2,0x0\n"
77 "1: jo 0b\n"
78 " lhi %0,0x0\n"
79 "2:\n"
80 EX_TABLE(1b,2b)
81 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
82 "+d" (_len2), "=m" (*((long *) dest))
83 : "m" (*((long *) src))
84 : "cc", "memory");
85 arch_local_irq_restore(flags);
86 return rc;
87}
88
89/*
90 * Copy memory to absolute zero
91 */
92void copy_to_absolute_zero(void *dest, void *src, size_t count)
93{
94 unsigned long cr0;
95
96 BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
97 preempt_disable();
98 __ctl_store(cr0, 0, 0);
99 __ctl_clear_bit(0, 28); /* disable lowcore protection */
100 memcpy_real(dest + store_prefix(), src, count);
101 __ctl_load(cr0, 0, 0);
102 preempt_enable();
103}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Access kernel memory without faulting -- s390 specific implementation.
4 *
5 * Copyright IBM Corp. 2009, 2015
6 *
7 */
8
9#include <linux/uaccess.h>
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/errno.h>
13#include <linux/gfp.h>
14#include <linux/cpu.h>
15#include <linux/uio.h>
16#include <linux/io.h>
17#include <asm/asm-extable.h>
18#include <asm/abs_lowcore.h>
19#include <asm/stacktrace.h>
20#include <asm/maccess.h>
21#include <asm/ctlreg.h>
22
23unsigned long __bootdata_preserved(__memcpy_real_area);
24pte_t *__bootdata_preserved(memcpy_real_ptep);
25static DEFINE_MUTEX(memcpy_real_mutex);
26
27static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
28{
29 unsigned long aligned, offset, count;
30 char tmp[8];
31
32 aligned = (unsigned long) dst & ~7UL;
33 offset = (unsigned long) dst & 7UL;
34 size = min(8UL - offset, size);
35 count = size - 1;
36 asm volatile(
37 " bras 1,0f\n"
38 " mvc 0(1,%4),0(%5)\n"
39 "0: mvc 0(8,%3),0(%0)\n"
40 " ex %1,0(1)\n"
41 " lg %1,0(%3)\n"
42 " lra %0,0(%0)\n"
43 " sturg %1,%0\n"
44 : "+&a" (aligned), "+&a" (count), "=m" (tmp)
45 : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
46 : "cc", "memory", "1");
47 return size;
48}
49
50/*
51 * __s390_kernel_write - write to kernel memory bypassing DAT
52 * @dst: destination address
53 * @src: source address
54 * @size: number of bytes to copy
55 *
56 * This function writes to kernel memory bypassing DAT and possible page table
57 * write protection. It writes to the destination using the sturg instruction.
58 * Therefore we have a read-modify-write sequence: the function reads eight
59 * bytes from destination at an eight byte boundary, modifies the bytes
60 * requested and writes the result back in a loop.
61 */
62static DEFINE_SPINLOCK(s390_kernel_write_lock);
63
64notrace void *__s390_kernel_write(void *dst, const void *src, size_t size)
65{
66 void *tmp = dst;
67 unsigned long flags;
68 long copied;
69
70 spin_lock_irqsave(&s390_kernel_write_lock, flags);
71 while (size) {
72 copied = s390_kernel_write_odd(tmp, src, size);
73 tmp += copied;
74 src += copied;
75 size -= copied;
76 }
77 spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
78
79 return dst;
80}
81
82size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
83{
84 size_t len, copied, res = 0;
85 unsigned long phys, offset;
86 void *chunk;
87 pte_t pte;
88
89 BUILD_BUG_ON(MEMCPY_REAL_SIZE != PAGE_SIZE);
90 while (count) {
91 phys = src & MEMCPY_REAL_MASK;
92 offset = src & ~MEMCPY_REAL_MASK;
93 chunk = (void *)(__memcpy_real_area + offset);
94 len = min(count, MEMCPY_REAL_SIZE - offset);
95 pte = mk_pte_phys(phys, PAGE_KERNEL_RO);
96
97 mutex_lock(&memcpy_real_mutex);
98 if (pte_val(pte) != pte_val(*memcpy_real_ptep)) {
99 __ptep_ipte(__memcpy_real_area, memcpy_real_ptep, 0, 0, IPTE_GLOBAL);
100 set_pte(memcpy_real_ptep, pte);
101 }
102 copied = copy_to_iter(chunk, len, iter);
103 mutex_unlock(&memcpy_real_mutex);
104
105 count -= copied;
106 src += copied;
107 res += copied;
108 if (copied < len)
109 break;
110 }
111 return res;
112}
113
114int memcpy_real(void *dest, unsigned long src, size_t count)
115{
116 struct iov_iter iter;
117 struct kvec kvec;
118
119 kvec.iov_base = dest;
120 kvec.iov_len = count;
121 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
122 if (memcpy_real_iter(&iter, src, count) < count)
123 return -EFAULT;
124 return 0;
125}
126
127/*
128 * Find CPU that owns swapped prefix page
129 */
130static int get_swapped_owner(phys_addr_t addr)
131{
132 phys_addr_t lc;
133 int cpu;
134
135 for_each_online_cpu(cpu) {
136 lc = virt_to_phys(lowcore_ptr[cpu]);
137 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
138 continue;
139 return cpu;
140 }
141 return -1;
142}
143
144/*
145 * Convert a physical pointer for /dev/mem access
146 *
147 * For swapped prefix pages a new buffer is returned that contains a copy of
148 * the absolute memory. The buffer size is maximum one page large.
149 */
150void *xlate_dev_mem_ptr(phys_addr_t addr)
151{
152 void *ptr = phys_to_virt(addr);
153 void *bounce = ptr;
154 struct lowcore *abs_lc;
155 unsigned long size;
156 int this_cpu, cpu;
157
158 cpus_read_lock();
159 this_cpu = get_cpu();
160 if (addr >= sizeof(struct lowcore)) {
161 cpu = get_swapped_owner(addr);
162 if (cpu < 0)
163 goto out;
164 }
165 bounce = (void *)__get_free_page(GFP_ATOMIC);
166 if (!bounce)
167 goto out;
168 size = PAGE_SIZE - (addr & ~PAGE_MASK);
169 if (addr < sizeof(struct lowcore)) {
170 abs_lc = get_abs_lowcore();
171 ptr = (void *)abs_lc + addr;
172 memcpy(bounce, ptr, size);
173 put_abs_lowcore(abs_lc);
174 } else if (cpu == this_cpu) {
175 ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu]));
176 memcpy(bounce, ptr, size);
177 } else {
178 memcpy(bounce, ptr, size);
179 }
180out:
181 put_cpu();
182 cpus_read_unlock();
183 return bounce;
184}
185
186/*
187 * Free converted buffer for /dev/mem access (if necessary)
188 */
189void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
190{
191 if (addr != virt_to_phys(ptr))
192 free_page((unsigned long)ptr);
193}