Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 | // SPDX-License-Identifier: GPL-2.0 /* * Access kernel memory without faulting -- s390 specific implementation. * * Copyright IBM Corp. 2009, 2015 * */ #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/gfp.h> #include <linux/cpu.h> #include <linux/uio.h> #include <linux/io.h> #include <asm/asm-extable.h> #include <asm/abs_lowcore.h> #include <asm/stacktrace.h> #include <asm/maccess.h> #include <asm/ctlreg.h> unsigned long __bootdata_preserved(__memcpy_real_area); pte_t *__bootdata_preserved(memcpy_real_ptep); static DEFINE_MUTEX(memcpy_real_mutex); static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) { unsigned long aligned, offset, count; char tmp[8]; aligned = (unsigned long) dst & ~7UL; offset = (unsigned long) dst & 7UL; size = min(8UL - offset, size); count = size - 1; asm volatile( " bras 1,0f\n" " mvc 0(1,%4),0(%5)\n" "0: mvc 0(8,%3),0(%0)\n" " ex %1,0(1)\n" " lg %1,0(%3)\n" " lra %0,0(%0)\n" " sturg %1,%0\n" : "+&a" (aligned), "+&a" (count), "=m" (tmp) : "a" (&tmp), "a" (&tmp[offset]), "a" (src) : "cc", "memory", "1"); return size; } /* * s390_kernel_write - write to kernel memory bypassing DAT * @dst: destination address * @src: source address * @size: number of bytes to copy * * This function writes to kernel memory bypassing DAT and possible page table * write protection. It writes to the destination using the sturg instruction. * Therefore we have a read-modify-write sequence: the function reads eight * bytes from destination at an eight byte boundary, modifies the bytes * requested and writes the result back in a loop. */ static DEFINE_SPINLOCK(s390_kernel_write_lock); notrace void *s390_kernel_write(void *dst, const void *src, size_t size) { void *tmp = dst; unsigned long flags; long copied; spin_lock_irqsave(&s390_kernel_write_lock, flags); while (size) { copied = s390_kernel_write_odd(tmp, src, size); tmp += copied; src += copied; size -= copied; } spin_unlock_irqrestore(&s390_kernel_write_lock, flags); return dst; } size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count) { size_t len, copied, res = 0; unsigned long phys, offset; void *chunk; pte_t pte; BUILD_BUG_ON(MEMCPY_REAL_SIZE != PAGE_SIZE); while (count) { phys = src & MEMCPY_REAL_MASK; offset = src & ~MEMCPY_REAL_MASK; chunk = (void *)(__memcpy_real_area + offset); len = min(count, MEMCPY_REAL_SIZE - offset); pte = mk_pte_phys(phys, PAGE_KERNEL_RO); mutex_lock(&memcpy_real_mutex); if (pte_val(pte) != pte_val(*memcpy_real_ptep)) { __ptep_ipte(__memcpy_real_area, memcpy_real_ptep, 0, 0, IPTE_GLOBAL); set_pte(memcpy_real_ptep, pte); } copied = copy_to_iter(chunk, len, iter); mutex_unlock(&memcpy_real_mutex); count -= copied; src += copied; res += copied; if (copied < len) break; } return res; } int memcpy_real(void *dest, unsigned long src, size_t count) { struct iov_iter iter; struct kvec kvec; kvec.iov_base = dest; kvec.iov_len = count; iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count); if (memcpy_real_iter(&iter, src, count) < count) return -EFAULT; return 0; } /* * Find CPU that owns swapped prefix page */ static int get_swapped_owner(phys_addr_t addr) { phys_addr_t lc; int cpu; for_each_online_cpu(cpu) { lc = virt_to_phys(lowcore_ptr[cpu]); if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) continue; return cpu; } return -1; } /* * Convert a physical pointer for /dev/mem access * * For swapped prefix pages a new buffer is returned that contains a copy of * the absolute memory. The buffer size is maximum one page large. */ void *xlate_dev_mem_ptr(phys_addr_t addr) { void *ptr = phys_to_virt(addr); void *bounce = ptr; struct lowcore *abs_lc; unsigned long size; int this_cpu, cpu; cpus_read_lock(); this_cpu = get_cpu(); if (addr >= sizeof(struct lowcore)) { cpu = get_swapped_owner(addr); if (cpu < 0) goto out; } bounce = (void *)__get_free_page(GFP_ATOMIC); if (!bounce) goto out; size = PAGE_SIZE - (addr & ~PAGE_MASK); if (addr < sizeof(struct lowcore)) { abs_lc = get_abs_lowcore(); ptr = (void *)abs_lc + addr; memcpy(bounce, ptr, size); put_abs_lowcore(abs_lc); } else if (cpu == this_cpu) { ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu])); memcpy(bounce, ptr, size); } else { memcpy(bounce, ptr, size); } out: put_cpu(); cpus_read_unlock(); return bounce; } /* * Free converted buffer for /dev/mem access (if necessary) */ void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr) { if (addr != virt_to_phys(ptr)) free_page((unsigned long)ptr); } |