Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * I/O string operations
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *    Copyright (C) 2006 IBM Corporation
  6 *
  7 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  8 * and Paul Mackerras.
  9 *
 10 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
 11 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
 12 *
 13 * Rewritten in C by Stephen Rothwell.
 
 
 
 
 
 14 */
 15#include <linux/kernel.h>
 16#include <linux/types.h>
 17#include <linux/compiler.h>
 18#include <linux/export.h>
 19
 20#include <asm/io.h>
 21#include <asm/firmware.h>
 22#include <asm/bug.h>
 23
 24/* See definition in io.h */
 25bool isa_io_special;
 26
 27void _insb(const volatile u8 __iomem *port, void *buf, long count)
 28{
 29	u8 *tbuf = buf;
 30	u8 tmp;
 31
 32	if (unlikely(count <= 0))
 33		return;
 34	asm volatile("sync");
 35	do {
 36		tmp = *port;
 37		eieio();
 38		*tbuf++ = tmp;
 39	} while (--count != 0);
 40	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
 41}
 42EXPORT_SYMBOL(_insb);
 43
 44void _outsb(volatile u8 __iomem *port, const void *buf, long count)
 45{
 46	const u8 *tbuf = buf;
 47
 48	if (unlikely(count <= 0))
 49		return;
 50	asm volatile("sync");
 51	do {
 52		*port = *tbuf++;
 53	} while (--count != 0);
 54	asm volatile("sync");
 55}
 56EXPORT_SYMBOL(_outsb);
 57
 58void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
 59{
 60	u16 *tbuf = buf;
 61	u16 tmp;
 62
 63	if (unlikely(count <= 0))
 64		return;
 65	asm volatile("sync");
 66	do {
 67		tmp = *port;
 68		eieio();
 69		*tbuf++ = tmp;
 70	} while (--count != 0);
 71	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
 72}
 73EXPORT_SYMBOL(_insw_ns);
 74
 75void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
 76{
 77	const u16 *tbuf = buf;
 78
 79	if (unlikely(count <= 0))
 80		return;
 81	asm volatile("sync");
 82	do {
 83		*port = *tbuf++;
 84	} while (--count != 0);
 85	asm volatile("sync");
 86}
 87EXPORT_SYMBOL(_outsw_ns);
 88
 89void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
 90{
 91	u32 *tbuf = buf;
 92	u32 tmp;
 93
 94	if (unlikely(count <= 0))
 95		return;
 96	asm volatile("sync");
 97	do {
 98		tmp = *port;
 99		eieio();
100		*tbuf++ = tmp;
101	} while (--count != 0);
102	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
103}
104EXPORT_SYMBOL(_insl_ns);
105
106void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
107{
108	const u32 *tbuf = buf;
109
110	if (unlikely(count <= 0))
111		return;
112	asm volatile("sync");
113	do {
114		*port = *tbuf++;
115	} while (--count != 0);
116	asm volatile("sync");
117}
118EXPORT_SYMBOL(_outsl_ns);
119
120#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
121
122notrace void
123_memset_io(volatile void __iomem *addr, int c, unsigned long n)
124{
125	void *p = (void __force *)addr;
126	u32 lc = c;
127	lc |= lc << 8;
128	lc |= lc << 16;
129
130	__asm__ __volatile__ ("sync" : : : "memory");
131	while(n && !IO_CHECK_ALIGN(p, 4)) {
132		*((volatile u8 *)p) = c;
133		p++;
134		n--;
135	}
136	while(n >= 4) {
137		*((volatile u32 *)p) = lc;
138		p += 4;
139		n -= 4;
140	}
141	while(n) {
142		*((volatile u8 *)p) = c;
143		p++;
144		n--;
145	}
146	__asm__ __volatile__ ("sync" : : : "memory");
147}
148EXPORT_SYMBOL(_memset_io);
149
150void _memcpy_fromio(void *dest, const volatile void __iomem *src,
151		    unsigned long n)
152{
153	void *vsrc = (void __force *) src;
154
155	__asm__ __volatile__ ("sync" : : : "memory");
156	while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
157		*((u8 *)dest) = *((volatile u8 *)vsrc);
158		eieio();
159		vsrc++;
160		dest++;
161		n--;
162	}
163	while(n >= 4) {
164		*((u32 *)dest) = *((volatile u32 *)vsrc);
165		eieio();
166		vsrc += 4;
167		dest += 4;
168		n -= 4;
169	}
170	while(n) {
171		*((u8 *)dest) = *((volatile u8 *)vsrc);
172		eieio();
173		vsrc++;
174		dest++;
175		n--;
176	}
177	__asm__ __volatile__ ("sync" : : : "memory");
178}
179EXPORT_SYMBOL(_memcpy_fromio);
180
181void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
182{
183	void *vdest = (void __force *) dest;
184
185	__asm__ __volatile__ ("sync" : : : "memory");
186	while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
187		*((volatile u8 *)vdest) = *((u8 *)src);
188		src++;
189		vdest++;
190		n--;
191	}
192	while(n >= 4) {
193		*((volatile u32 *)vdest) = *((volatile u32 *)src);
194		src += 4;
195		vdest += 4;
196		n-=4;
197	}
198	while(n) {
199		*((volatile u8 *)vdest) = *((u8 *)src);
200		src++;
201		vdest++;
202		n--;
203	}
204	__asm__ __volatile__ ("sync" : : : "memory");
205}
206EXPORT_SYMBOL(_memcpy_toio);
v4.6
 
  1/*
  2 * I/O string operations
  3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4 *    Copyright (C) 2006 IBM Corporation
  5 *
  6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  7 * and Paul Mackerras.
  8 *
  9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
 10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
 11 *
 12 * Rewritten in C by Stephen Rothwell.
 13 *
 14 * This program is free software; you can redistribute it and/or
 15 * modify it under the terms of the GNU General Public License
 16 * as published by the Free Software Foundation; either version
 17 * 2 of the License, or (at your option) any later version.
 18 */
 19#include <linux/kernel.h>
 20#include <linux/types.h>
 21#include <linux/compiler.h>
 22#include <linux/export.h>
 23
 24#include <asm/io.h>
 25#include <asm/firmware.h>
 26#include <asm/bug.h>
 27
 28/* See definition in io.h */
 29bool isa_io_special;
 30
 31void _insb(const volatile u8 __iomem *port, void *buf, long count)
 32{
 33	u8 *tbuf = buf;
 34	u8 tmp;
 35
 36	if (unlikely(count <= 0))
 37		return;
 38	asm volatile("sync");
 39	do {
 40		tmp = *port;
 41		eieio();
 42		*tbuf++ = tmp;
 43	} while (--count != 0);
 44	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
 45}
 46EXPORT_SYMBOL(_insb);
 47
 48void _outsb(volatile u8 __iomem *port, const void *buf, long count)
 49{
 50	const u8 *tbuf = buf;
 51
 52	if (unlikely(count <= 0))
 53		return;
 54	asm volatile("sync");
 55	do {
 56		*port = *tbuf++;
 57	} while (--count != 0);
 58	asm volatile("sync");
 59}
 60EXPORT_SYMBOL(_outsb);
 61
 62void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
 63{
 64	u16 *tbuf = buf;
 65	u16 tmp;
 66
 67	if (unlikely(count <= 0))
 68		return;
 69	asm volatile("sync");
 70	do {
 71		tmp = *port;
 72		eieio();
 73		*tbuf++ = tmp;
 74	} while (--count != 0);
 75	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
 76}
 77EXPORT_SYMBOL(_insw_ns);
 78
 79void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
 80{
 81	const u16 *tbuf = buf;
 82
 83	if (unlikely(count <= 0))
 84		return;
 85	asm volatile("sync");
 86	do {
 87		*port = *tbuf++;
 88	} while (--count != 0);
 89	asm volatile("sync");
 90}
 91EXPORT_SYMBOL(_outsw_ns);
 92
 93void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
 94{
 95	u32 *tbuf = buf;
 96	u32 tmp;
 97
 98	if (unlikely(count <= 0))
 99		return;
100	asm volatile("sync");
101	do {
102		tmp = *port;
103		eieio();
104		*tbuf++ = tmp;
105	} while (--count != 0);
106	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
107}
108EXPORT_SYMBOL(_insl_ns);
109
110void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
111{
112	const u32 *tbuf = buf;
113
114	if (unlikely(count <= 0))
115		return;
116	asm volatile("sync");
117	do {
118		*port = *tbuf++;
119	} while (--count != 0);
120	asm volatile("sync");
121}
122EXPORT_SYMBOL(_outsl_ns);
123
124#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
125
126notrace void
127_memset_io(volatile void __iomem *addr, int c, unsigned long n)
128{
129	void *p = (void __force *)addr;
130	u32 lc = c;
131	lc |= lc << 8;
132	lc |= lc << 16;
133
134	__asm__ __volatile__ ("sync" : : : "memory");
135	while(n && !IO_CHECK_ALIGN(p, 4)) {
136		*((volatile u8 *)p) = c;
137		p++;
138		n--;
139	}
140	while(n >= 4) {
141		*((volatile u32 *)p) = lc;
142		p += 4;
143		n -= 4;
144	}
145	while(n) {
146		*((volatile u8 *)p) = c;
147		p++;
148		n--;
149	}
150	__asm__ __volatile__ ("sync" : : : "memory");
151}
152EXPORT_SYMBOL(_memset_io);
153
154void _memcpy_fromio(void *dest, const volatile void __iomem *src,
155		    unsigned long n)
156{
157	void *vsrc = (void __force *) src;
158
159	__asm__ __volatile__ ("sync" : : : "memory");
160	while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
161		*((u8 *)dest) = *((volatile u8 *)vsrc);
162		eieio();
163		vsrc++;
164		dest++;
165		n--;
166	}
167	while(n >= 4) {
168		*((u32 *)dest) = *((volatile u32 *)vsrc);
169		eieio();
170		vsrc += 4;
171		dest += 4;
172		n -= 4;
173	}
174	while(n) {
175		*((u8 *)dest) = *((volatile u8 *)vsrc);
176		eieio();
177		vsrc++;
178		dest++;
179		n--;
180	}
181	__asm__ __volatile__ ("sync" : : : "memory");
182}
183EXPORT_SYMBOL(_memcpy_fromio);
184
185void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
186{
187	void *vdest = (void __force *) dest;
188
189	__asm__ __volatile__ ("sync" : : : "memory");
190	while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
191		*((volatile u8 *)vdest) = *((u8 *)src);
192		src++;
193		vdest++;
194		n--;
195	}
196	while(n >= 4) {
197		*((volatile u32 *)vdest) = *((volatile u32 *)src);
198		src += 4;
199		vdest += 4;
200		n-=4;
201	}
202	while(n) {
203		*((volatile u8 *)vdest) = *((u8 *)src);
204		src++;
205		vdest++;
206		n--;
207	}
208	__asm__ __volatile__ ("sync" : : : "memory");
209}
210EXPORT_SYMBOL(_memcpy_toio);