Loading...
1/*
2 * arch/sh/kernel/io.c - Machine independent I/O functions.
3 *
4 * Copyright (C) 2000 - 2009 Stuart Menefy
5 * Copyright (C) 2005 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <asm/machvec.h>
14#include <asm/io.h>
15
16/*
17 * Copy data from IO memory space to "real" memory space.
18 */
19void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
20{
21 /*
22 * Would it be worthwhile doing byte and long transfers first
23 * to try and get aligned?
24 */
25#ifdef CONFIG_CPU_SH4
26 if ((count >= 0x20) &&
27 (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
28 int tmp2, tmp3, tmp4, tmp5, tmp6;
29
30 __asm__ __volatile__(
31 "1: \n\t"
32 "mov.l @%7+, r0 \n\t"
33 "mov.l @%7+, %2 \n\t"
34 "movca.l r0, @%0 \n\t"
35 "mov.l @%7+, %3 \n\t"
36 "mov.l @%7+, %4 \n\t"
37 "mov.l @%7+, %5 \n\t"
38 "mov.l @%7+, %6 \n\t"
39 "mov.l @%7+, r7 \n\t"
40 "mov.l @%7+, r0 \n\t"
41 "mov.l %2, @(0x04,%0) \n\t"
42 "mov #0x20, %2 \n\t"
43 "mov.l %3, @(0x08,%0) \n\t"
44 "sub %2, %1 \n\t"
45 "mov.l %4, @(0x0c,%0) \n\t"
46 "cmp/hi %1, %2 ! T if 32 > count \n\t"
47 "mov.l %5, @(0x10,%0) \n\t"
48 "mov.l %6, @(0x14,%0) \n\t"
49 "mov.l r7, @(0x18,%0) \n\t"
50 "mov.l r0, @(0x1c,%0) \n\t"
51 "bf.s 1b \n\t"
52 " add #0x20, %0 \n\t"
53 : "=&r" (to), "=&r" (count),
54 "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
55 "=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
56 : "7"(from), "0" (to), "1" (count)
57 : "r0", "r7", "t", "memory");
58 }
59#endif
60
61 if ((((u32)to | (u32)from) & 0x3) == 0) {
62 for (; count > 3; count -= 4) {
63 *(u32 *)to = *(volatile u32 *)from;
64 to += 4;
65 from += 4;
66 }
67 }
68
69 for (; count > 0; count--) {
70 *(u8 *)to = *(volatile u8 *)from;
71 to++;
72 from++;
73 }
74
75 mb();
76}
77EXPORT_SYMBOL(memcpy_fromio);
78
79/*
80 * Copy data from "real" memory space to IO memory space.
81 */
82void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
83{
84 if ((((u32)to | (u32)from) & 0x3) == 0) {
85 for ( ; count > 3; count -= 4) {
86 *(volatile u32 *)to = *(u32 *)from;
87 to += 4;
88 from += 4;
89 }
90 }
91
92 for (; count > 0; count--) {
93 *(volatile u8 *)to = *(u8 *)from;
94 to++;
95 from++;
96 }
97
98 mb();
99}
100EXPORT_SYMBOL(memcpy_toio);
101
102/*
103 * "memset" on IO memory space.
104 * This needs to be optimized.
105 */
106void memset_io(volatile void __iomem *dst, int c, unsigned long count)
107{
108 while (count) {
109 count--;
110 writeb(c, dst);
111 dst++;
112 }
113}
114EXPORT_SYMBOL(memset_io);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/sh/kernel/io.c - Machine independent I/O functions.
4 *
5 * Copyright (C) 2000 - 2009 Stuart Menefy
6 * Copyright (C) 2005 Paul Mundt
7 */
8#include <linux/module.h>
9#include <linux/pci.h>
10#include <asm/machvec.h>
11#include <asm/io.h>
12
13/*
14 * Copy data from IO memory space to "real" memory space.
15 */
16void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
17{
18 /*
19 * Would it be worthwhile doing byte and long transfers first
20 * to try and get aligned?
21 */
22#ifdef CONFIG_CPU_SH4
23 if ((count >= 0x20) &&
24 (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
25 int tmp2, tmp3, tmp4, tmp5, tmp6;
26
27 __asm__ __volatile__(
28 "1: \n\t"
29 "mov.l @%7+, r0 \n\t"
30 "mov.l @%7+, %2 \n\t"
31 "movca.l r0, @%0 \n\t"
32 "mov.l @%7+, %3 \n\t"
33 "mov.l @%7+, %4 \n\t"
34 "mov.l @%7+, %5 \n\t"
35 "mov.l @%7+, %6 \n\t"
36 "mov.l @%7+, r7 \n\t"
37 "mov.l @%7+, r0 \n\t"
38 "mov.l %2, @(0x04,%0) \n\t"
39 "mov #0x20, %2 \n\t"
40 "mov.l %3, @(0x08,%0) \n\t"
41 "sub %2, %1 \n\t"
42 "mov.l %4, @(0x0c,%0) \n\t"
43 "cmp/hi %1, %2 ! T if 32 > count \n\t"
44 "mov.l %5, @(0x10,%0) \n\t"
45 "mov.l %6, @(0x14,%0) \n\t"
46 "mov.l r7, @(0x18,%0) \n\t"
47 "mov.l r0, @(0x1c,%0) \n\t"
48 "bf.s 1b \n\t"
49 " add #0x20, %0 \n\t"
50 : "=&r" (to), "=&r" (count),
51 "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
52 "=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
53 : "7"(from), "0" (to), "1" (count)
54 : "r0", "r7", "t", "memory");
55 }
56#endif
57
58 if ((((u32)to | (u32)from) & 0x3) == 0) {
59 for (; count > 3; count -= 4) {
60 *(u32 *)to = *(volatile u32 *)from;
61 to += 4;
62 from += 4;
63 }
64 }
65
66 for (; count > 0; count--) {
67 *(u8 *)to = *(volatile u8 *)from;
68 to++;
69 from++;
70 }
71
72 mb();
73}
74EXPORT_SYMBOL(memcpy_fromio);
75
76/*
77 * Copy data from "real" memory space to IO memory space.
78 */
79void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
80{
81 if ((((u32)to | (u32)from) & 0x3) == 0) {
82 for ( ; count > 3; count -= 4) {
83 *(volatile u32 *)to = *(u32 *)from;
84 to += 4;
85 from += 4;
86 }
87 }
88
89 for (; count > 0; count--) {
90 *(volatile u8 *)to = *(u8 *)from;
91 to++;
92 from++;
93 }
94
95 mb();
96}
97EXPORT_SYMBOL(memcpy_toio);
98
99/*
100 * "memset" on IO memory space.
101 * This needs to be optimized.
102 */
103void memset_io(volatile void __iomem *dst, int c, unsigned long count)
104{
105 while (count) {
106 count--;
107 writeb(c, dst);
108 dst++;
109 }
110}
111EXPORT_SYMBOL(memset_io);