Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Access to VGA videoram
4 *
5 * (c) 1998 Martin Mares <mj@ucw.cz>
6 */
7#ifndef _ASM_VGA_H
8#define _ASM_VGA_H
9
10#include <linux/string.h>
11#include <asm/addrspace.h>
12#include <asm/byteorder.h>
13
14/*
15 * On the PC, we can just recalculate addresses and then
16 * access the videoram directly without any black magic.
17 */
18
19#define VGA_MAP_MEM(x, s) CKSEG1ADDR(0x10000000L + (unsigned long)(x))
20
21#define vga_readb(x) (*(x))
22#define vga_writeb(x, y) (*(y) = (x))
23
24#define VT_BUF_HAVE_RW
25/*
26 * These are only needed for supporting VGA or MDA text mode, which use little
27 * endian byte ordering.
28 * In other cases, we can optimize by using native byte ordering and
29 * <linux/vt_buffer.h> has already done the right job for us.
30 */
31
32#undef scr_writew
33#undef scr_readw
34
35static inline void scr_writew(u16 val, volatile u16 *addr)
36{
37 *addr = cpu_to_le16(val);
38}
39
40static inline u16 scr_readw(volatile const u16 *addr)
41{
42 return le16_to_cpu(*addr);
43}
44
45static inline void scr_memsetw(u16 *s, u16 v, unsigned int count)
46{
47 memset16(s, cpu_to_le16(v), count / 2);
48}
49
50#define scr_memcpyw(d, s, c) memcpy(d, s, c)
51#define scr_memmovew(d, s, c) memmove(d, s, c)
52#define VT_BUF_HAVE_MEMCPYW
53#define VT_BUF_HAVE_MEMMOVEW
54#define VT_BUF_HAVE_MEMSETW
55
56#endif /* _ASM_VGA_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Access to VGA videoram
4 *
5 * (c) 1998 Martin Mares <mj@ucw.cz>
6 */
7#ifndef _ASM_VGA_H
8#define _ASM_VGA_H
9
10#include <linux/string.h>
11#include <asm/addrspace.h>
12#include <asm/byteorder.h>
13
14/*
15 * On the PC, we can just recalculate addresses and then
16 * access the videoram directly without any black magic.
17 */
18
19#define VGA_MAP_MEM(x, s) CKSEG1ADDR(0x10000000L + (unsigned long)(x))
20
21#define vga_readb(x) (*(x))
22#define vga_writeb(x, y) (*(y) = (x))
23
24#define VT_BUF_HAVE_RW
25/*
26 * These are only needed for supporting VGA or MDA text mode, which use little
27 * endian byte ordering.
28 * In other cases, we can optimize by using native byte ordering and
29 * <linux/vt_buffer.h> has already done the right job for us.
30 */
31
32#undef scr_writew
33#undef scr_readw
34
35static inline void scr_writew(u16 val, volatile u16 *addr)
36{
37 *addr = cpu_to_le16(val);
38}
39
40static inline u16 scr_readw(volatile const u16 *addr)
41{
42 return le16_to_cpu(*addr);
43}
44
45static inline void scr_memsetw(u16 *s, u16 v, unsigned int count)
46{
47 memset16(s, cpu_to_le16(v), count / 2);
48}
49
50#define VT_BUF_HAVE_MEMSETW
51
52#endif /* _ASM_VGA_H */