Loading...
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_STUB_H
7#define __SYSDEP_STUB_H
8
9#include <asm/ptrace.h>
10
11#define STUB_SYSCALL_RET EAX
12#define STUB_MMAP_NR __NR_mmap2
13#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
14
15static inline long stub_syscall0(long syscall)
16{
17 long ret;
18
19 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall));
20
21 return ret;
22}
23
24static inline long stub_syscall1(long syscall, long arg1)
25{
26 long ret;
27
28 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1));
29
30 return ret;
31}
32
33static inline long stub_syscall2(long syscall, long arg1, long arg2)
34{
35 long ret;
36
37 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
38 "c" (arg2));
39
40 return ret;
41}
42
43static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
44{
45 long ret;
46
47 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
48 "c" (arg2), "d" (arg3));
49
50 return ret;
51}
52
53static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
54 long arg4)
55{
56 long ret;
57
58 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
59 "c" (arg2), "d" (arg3), "S" (arg4));
60
61 return ret;
62}
63
64static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
65 long arg4, long arg5)
66{
67 long ret;
68
69 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
70 "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5));
71
72 return ret;
73}
74
75static inline void trap_myself(void)
76{
77 __asm("int3");
78}
79
80static inline void remap_stack(int fd, unsigned long offset)
81{
82 __asm__ volatile ("movl %%eax,%%ebp ; movl %0,%%eax ; int $0x80 ;"
83 "movl %7, %%ebx ; movl %%eax, (%%ebx)"
84 : : "g" (STUB_MMAP_NR), "b" (STUB_DATA),
85 "c" (UM_KERN_PAGE_SIZE),
86 "d" (PROT_READ | PROT_WRITE),
87 "S" (MAP_FIXED | MAP_SHARED), "D" (fd),
88 "a" (offset),
89 "i" (&((struct stub_data *) STUB_DATA)->err)
90 : "memory");
91}
92
93#endif
1/*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __SYSDEP_STUB_H
7#define __SYSDEP_STUB_H
8
9#include <stddef.h>
10#include <asm/ptrace.h>
11#include <generated/asm-offsets.h>
12
13#define STUB_MMAP_NR __NR_mmap2
14#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
15
16static __always_inline long stub_syscall0(long syscall)
17{
18 long ret;
19
20 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall)
21 : "memory");
22
23 return ret;
24}
25
26static __always_inline long stub_syscall1(long syscall, long arg1)
27{
28 long ret;
29
30 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1)
31 : "memory");
32
33 return ret;
34}
35
36static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
37{
38 long ret;
39
40 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
41 "c" (arg2)
42 : "memory");
43
44 return ret;
45}
46
47static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
48 long arg3)
49{
50 long ret;
51
52 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
53 "c" (arg2), "d" (arg3)
54 : "memory");
55
56 return ret;
57}
58
59static __always_inline long stub_syscall4(long syscall, long arg1, long arg2,
60 long arg3, long arg4)
61{
62 long ret;
63
64 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
65 "c" (arg2), "d" (arg3), "S" (arg4)
66 : "memory");
67
68 return ret;
69}
70
71static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
72 long arg3, long arg4, long arg5)
73{
74 long ret;
75
76 __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
77 "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
78 : "memory");
79
80 return ret;
81}
82
83static __always_inline long stub_syscall6(long syscall, long arg1, long arg2,
84 long arg3, long arg4, long arg5,
85 long arg6)
86{
87 struct syscall_args {
88 int ebx, ebp;
89 } args = { arg1, arg6 };
90 long ret;
91
92 __asm__ volatile ("pushl %%ebp;"
93 "movl 0x4(%%ebx),%%ebp;"
94 "movl (%%ebx),%%ebx;"
95 "int $0x80;"
96 "popl %%ebp"
97 : "=a" (ret)
98 : "0" (syscall), "b" (&args),
99 "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
100 : "memory");
101
102 return ret;
103}
104
105static __always_inline void trap_myself(void)
106{
107 __asm("int3");
108}
109
110static __always_inline void *get_stub_data(void)
111{
112 unsigned long ret;
113
114 asm volatile (
115 "call _here_%=;"
116 "_here_%=:"
117 "popl %0;"
118 "andl %1, %0 ;"
119 "addl %2, %0 ;"
120 : "=a" (ret)
121 : "g" (~(UM_KERN_PAGE_SIZE - 1)),
122 "g" (UM_KERN_PAGE_SIZE));
123
124 return (void *)ret;
125}
126
127#define stub_start(fn) \
128 asm volatile ( \
129 "subl %0,%%esp ;" \
130 "movl %1, %%eax ; " \
131 "call *%%eax ;" \
132 :: "i" ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE), \
133 "i" (&fn))
134#endif