Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Access kernel memory without faulting -- s390 specific implementation.
  3 *
  4 * Copyright IBM Corp. 2009, 2015
  5 *
  6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7 *
  8 */
  9
 10#include <linux/uaccess.h>
 11#include <linux/kernel.h>
 12#include <linux/types.h>
 13#include <linux/errno.h>
 14#include <linux/gfp.h>
 15#include <linux/cpu.h>
 16#include <asm/ctl_reg.h>
 17#include <asm/io.h>
 
 18
 19static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
 20{
 21	unsigned long aligned, offset, count;
 22	char tmp[8];
 23
 24	aligned = (unsigned long) dst & ~7UL;
 25	offset = (unsigned long) dst & 7UL;
 26	size = min(8UL - offset, size);
 27	count = size - 1;
 28	asm volatile(
 29		"	bras	1,0f\n"
 30		"	mvc	0(1,%4),0(%5)\n"
 31		"0:	mvc	0(8,%3),0(%0)\n"
 32		"	ex	%1,0(1)\n"
 33		"	lg	%1,0(%3)\n"
 34		"	lra	%0,0(%0)\n"
 35		"	sturg	%1,%0\n"
 36		: "+&a" (aligned), "+&a" (count), "=m" (tmp)
 37		: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
 38		: "cc", "memory", "1");
 39	return size;
 40}
 41
 42/*
 43 * s390_kernel_write - write to kernel memory bypassing DAT
 44 * @dst: destination address
 45 * @src: source address
 46 * @size: number of bytes to copy
 47 *
 48 * This function writes to kernel memory bypassing DAT and possible page table
 49 * write protection. It writes to the destination using the sturg instruction.
 50 * Therefore we have a read-modify-write sequence: the function reads eight
 51 * bytes from destination at an eight byte boundary, modifies the bytes
 52 * requested and writes the result back in a loop.
 53 *
 54 * Note: this means that this function may not be called concurrently on
 55 *	 several cpus with overlapping words, since this may potentially
 56 *	 cause data corruption.
 57 */
 58void notrace s390_kernel_write(void *dst, const void *src, size_t size)
 
 
 59{
 
 
 60	long copied;
 61
 62	while (size) {
 63		copied = s390_kernel_write_odd(dst, src, size);
 64		dst += copied;
 65		src += copied;
 66		size -= copied;
 
 
 
 
 
 67	}
 
 
 
 68}
 69
 70static int __memcpy_real(void *dest, void *src, size_t count)
 71{
 72	register unsigned long _dest asm("2") = (unsigned long) dest;
 73	register unsigned long _len1 asm("3") = (unsigned long) count;
 74	register unsigned long _src  asm("4") = (unsigned long) src;
 75	register unsigned long _len2 asm("5") = (unsigned long) count;
 76	int rc = -EFAULT;
 77
 78	asm volatile (
 79		"0:	mvcle	%1,%2,0x0\n"
 80		"1:	jo	0b\n"
 81		"	lhi	%0,0x0\n"
 82		"2:\n"
 83		EX_TABLE(1b,2b)
 84		: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
 85		  "+d" (_len2), "=m" (*((long *) dest))
 86		: "m" (*((long *) src))
 87		: "cc", "memory");
 88	return rc;
 89}
 90
 91/*
 92 * Copy memory in real mode (kernel to kernel)
 93 */
 94int memcpy_real(void *dest, void *src, size_t count)
 95{
 96	int irqs_disabled, rc;
 97	unsigned long flags;
 98
 99	if (!count)
100		return 0;
101	flags = __arch_local_irq_stnsm(0xf8UL);
102	irqs_disabled = arch_irqs_disabled_flags(flags);
103	if (!irqs_disabled)
104		trace_hardirqs_off();
105	rc = __memcpy_real(dest, src, count);
 
 
 
106	if (!irqs_disabled)
107		trace_hardirqs_on();
108	__arch_local_irq_ssm(flags);
109	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110}
111
112/*
113 * Copy memory in absolute mode (kernel to kernel)
114 */
115void memcpy_absolute(void *dest, void *src, size_t count)
116{
117	unsigned long cr0, flags, prefix;
118
119	flags = arch_local_irq_save();
120	__ctl_store(cr0, 0, 0);
121	__ctl_clear_bit(0, 28); /* disable lowcore protection */
122	prefix = store_prefix();
123	if (prefix) {
124		local_mcck_disable();
125		set_prefix(0);
126		memcpy(dest, src, count);
127		set_prefix(prefix);
128		local_mcck_enable();
129	} else {
130		memcpy(dest, src, count);
131	}
132	__ctl_load(cr0, 0, 0);
133	arch_local_irq_restore(flags);
134}
135
136/*
137 * Copy memory from kernel (real) to user (virtual)
138 */
139int copy_to_user_real(void __user *dest, void *src, unsigned long count)
140{
141	int offs = 0, size, rc;
142	char *buf;
143
144	buf = (char *) __get_free_page(GFP_KERNEL);
145	if (!buf)
146		return -ENOMEM;
147	rc = -EFAULT;
148	while (offs < count) {
149		size = min(PAGE_SIZE, count - offs);
150		if (memcpy_real(buf, src + offs, size))
151			goto out;
152		if (copy_to_user(dest + offs, buf, size))
153			goto out;
154		offs += size;
155	}
156	rc = 0;
157out:
158	free_page((unsigned long) buf);
159	return rc;
160}
161
162/*
163 * Check if physical address is within prefix or zero page
164 */
165static int is_swapped(unsigned long addr)
166{
167	unsigned long lc;
168	int cpu;
169
170	if (addr < sizeof(struct lowcore))
171		return 1;
172	for_each_online_cpu(cpu) {
173		lc = (unsigned long) lowcore_ptr[cpu];
174		if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
175			continue;
176		return 1;
177	}
178	return 0;
179}
180
181/*
182 * Convert a physical pointer for /dev/mem access
183 *
184 * For swapped prefix pages a new buffer is returned that contains a copy of
185 * the absolute memory. The buffer size is maximum one page large.
186 */
187void *xlate_dev_mem_ptr(phys_addr_t addr)
188{
189	void *bounce = (void *) addr;
190	unsigned long size;
191
192	get_online_cpus();
193	preempt_disable();
194	if (is_swapped(addr)) {
195		size = PAGE_SIZE - (addr & ~PAGE_MASK);
196		bounce = (void *) __get_free_page(GFP_ATOMIC);
197		if (bounce)
198			memcpy_absolute(bounce, (void *) addr, size);
199	}
200	preempt_enable();
201	put_online_cpus();
202	return bounce;
203}
204
205/*
206 * Free converted buffer for /dev/mem access (if necessary)
207 */
208void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
209{
210	if ((void *) addr != buf)
211		free_page((unsigned long) buf);
212}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Access kernel memory without faulting -- s390 specific implementation.
  4 *
  5 * Copyright IBM Corp. 2009, 2015
  6 *
  7 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  8 *
  9 */
 10
 11#include <linux/uaccess.h>
 12#include <linux/kernel.h>
 13#include <linux/types.h>
 14#include <linux/errno.h>
 15#include <linux/gfp.h>
 16#include <linux/cpu.h>
 17#include <asm/ctl_reg.h>
 18#include <asm/io.h>
 19#include <asm/stacktrace.h>
 20
 21static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
 22{
 23	unsigned long aligned, offset, count;
 24	char tmp[8];
 25
 26	aligned = (unsigned long) dst & ~7UL;
 27	offset = (unsigned long) dst & 7UL;
 28	size = min(8UL - offset, size);
 29	count = size - 1;
 30	asm volatile(
 31		"	bras	1,0f\n"
 32		"	mvc	0(1,%4),0(%5)\n"
 33		"0:	mvc	0(8,%3),0(%0)\n"
 34		"	ex	%1,0(1)\n"
 35		"	lg	%1,0(%3)\n"
 36		"	lra	%0,0(%0)\n"
 37		"	sturg	%1,%0\n"
 38		: "+&a" (aligned), "+&a" (count), "=m" (tmp)
 39		: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
 40		: "cc", "memory", "1");
 41	return size;
 42}
 43
 44/*
 45 * s390_kernel_write - write to kernel memory bypassing DAT
 46 * @dst: destination address
 47 * @src: source address
 48 * @size: number of bytes to copy
 49 *
 50 * This function writes to kernel memory bypassing DAT and possible page table
 51 * write protection. It writes to the destination using the sturg instruction.
 52 * Therefore we have a read-modify-write sequence: the function reads eight
 53 * bytes from destination at an eight byte boundary, modifies the bytes
 54 * requested and writes the result back in a loop.
 
 
 
 
 55 */
 56static DEFINE_SPINLOCK(s390_kernel_write_lock);
 57
 58notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
 59{
 60	void *tmp = dst;
 61	unsigned long flags;
 62	long copied;
 63
 64	spin_lock_irqsave(&s390_kernel_write_lock, flags);
 65	if (!(flags & PSW_MASK_DAT)) {
 66		memcpy(dst, src, size);
 67	} else {
 68		while (size) {
 69			copied = s390_kernel_write_odd(tmp, src, size);
 70			tmp += copied;
 71			src += copied;
 72			size -= copied;
 73		}
 74	}
 75	spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
 76
 77	return dst;
 78}
 79
 80static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
 81{
 82	register unsigned long _dest asm("2") = (unsigned long) dest;
 83	register unsigned long _len1 asm("3") = (unsigned long) count;
 84	register unsigned long _src  asm("4") = (unsigned long) src;
 85	register unsigned long _len2 asm("5") = (unsigned long) count;
 86	int rc = -EFAULT;
 87
 88	asm volatile (
 89		"0:	mvcle	%1,%2,0x0\n"
 90		"1:	jo	0b\n"
 91		"	lhi	%0,0x0\n"
 92		"2:\n"
 93		EX_TABLE(1b,2b)
 94		: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
 95		  "+d" (_len2), "=m" (*((long *) dest))
 96		: "m" (*((long *) src))
 97		: "cc", "memory");
 98	return rc;
 99}
100
101static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
102							unsigned long src,
103							unsigned long count)
 
104{
105	int irqs_disabled, rc;
106	unsigned long flags;
107
108	if (!count)
109		return 0;
110	flags = arch_local_irq_save();
111	irqs_disabled = arch_irqs_disabled_flags(flags);
112	if (!irqs_disabled)
113		trace_hardirqs_off();
114	__arch_local_irq_stnsm(0xf8); // disable DAT
115	rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
116	if (flags & PSW_MASK_DAT)
117		__arch_local_irq_stosm(0x04); // enable DAT
118	if (!irqs_disabled)
119		trace_hardirqs_on();
120	__arch_local_irq_ssm(flags);
121	return rc;
122}
123
124/*
125 * Copy memory in real mode (kernel to kernel)
126 */
127int memcpy_real(void *dest, void *src, size_t count)
128{
129	int rc;
130
131	if (S390_lowcore.nodat_stack != 0) {
132		preempt_disable();
133		rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3,
134				   dest, src, count);
135		preempt_enable();
136		return rc;
137	}
138	/*
139	 * This is a really early memcpy_real call, the stacks are
140	 * not set up yet. Just call _memcpy_real on the early boot
141	 * stack
142	 */
143	return _memcpy_real((unsigned long) dest,(unsigned long) src,
144			    (unsigned long) count);
145}
146
147/*
148 * Copy memory in absolute mode (kernel to kernel)
149 */
150void memcpy_absolute(void *dest, void *src, size_t count)
151{
152	unsigned long cr0, flags, prefix;
153
154	flags = arch_local_irq_save();
155	__ctl_store(cr0, 0, 0);
156	__ctl_clear_bit(0, 28); /* disable lowcore protection */
157	prefix = store_prefix();
158	if (prefix) {
159		local_mcck_disable();
160		set_prefix(0);
161		memcpy(dest, src, count);
162		set_prefix(prefix);
163		local_mcck_enable();
164	} else {
165		memcpy(dest, src, count);
166	}
167	__ctl_load(cr0, 0, 0);
168	arch_local_irq_restore(flags);
169}
170
171/*
172 * Copy memory from kernel (real) to user (virtual)
173 */
174int copy_to_user_real(void __user *dest, void *src, unsigned long count)
175{
176	int offs = 0, size, rc;
177	char *buf;
178
179	buf = (char *) __get_free_page(GFP_KERNEL);
180	if (!buf)
181		return -ENOMEM;
182	rc = -EFAULT;
183	while (offs < count) {
184		size = min(PAGE_SIZE, count - offs);
185		if (memcpy_real(buf, src + offs, size))
186			goto out;
187		if (copy_to_user(dest + offs, buf, size))
188			goto out;
189		offs += size;
190	}
191	rc = 0;
192out:
193	free_page((unsigned long) buf);
194	return rc;
195}
196
197/*
198 * Check if physical address is within prefix or zero page
199 */
200static int is_swapped(unsigned long addr)
201{
202	unsigned long lc;
203	int cpu;
204
205	if (addr < sizeof(struct lowcore))
206		return 1;
207	for_each_online_cpu(cpu) {
208		lc = (unsigned long) lowcore_ptr[cpu];
209		if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
210			continue;
211		return 1;
212	}
213	return 0;
214}
215
216/*
217 * Convert a physical pointer for /dev/mem access
218 *
219 * For swapped prefix pages a new buffer is returned that contains a copy of
220 * the absolute memory. The buffer size is maximum one page large.
221 */
222void *xlate_dev_mem_ptr(phys_addr_t addr)
223{
224	void *bounce = (void *) addr;
225	unsigned long size;
226
227	get_online_cpus();
228	preempt_disable();
229	if (is_swapped(addr)) {
230		size = PAGE_SIZE - (addr & ~PAGE_MASK);
231		bounce = (void *) __get_free_page(GFP_ATOMIC);
232		if (bounce)
233			memcpy_absolute(bounce, (void *) addr, size);
234	}
235	preempt_enable();
236	put_online_cpus();
237	return bounce;
238}
239
240/*
241 * Free converted buffer for /dev/mem access (if necessary)
242 */
243void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
244{
245	if ((void *) addr != buf)
246		free_page((unsigned long) buf);
247}