Loading...
1/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2/*
3 * AARCH64 specific definitions for NOLIBC
4 * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
5 */
6
7#ifndef _NOLIBC_ARCH_AARCH64_H
8#define _NOLIBC_ARCH_AARCH64_H
9
10#include "compiler.h"
11#include "crt.h"
12
13/* Syscalls for AARCH64 :
14 * - registers are 64-bit
15 * - stack is 16-byte aligned
16 * - syscall number is passed in x8
17 * - arguments are in x0, x1, x2, x3, x4, x5
18 * - the system call is performed by calling svc 0
19 * - syscall return comes in x0.
20 * - the arguments are cast to long and assigned into the target registers
21 * which are then simply passed as registers to the asm code, so that we
22 * don't have to experience issues with register constraints.
23 */
24
25#define my_syscall0(num) \
26({ \
27 register long _num __asm__ ("x8") = (num); \
28 register long _arg1 __asm__ ("x0"); \
29 \
30 __asm__ volatile ( \
31 "svc #0\n" \
32 : "=r"(_arg1) \
33 : "r"(_num) \
34 : "memory", "cc" \
35 ); \
36 _arg1; \
37})
38
39#define my_syscall1(num, arg1) \
40({ \
41 register long _num __asm__ ("x8") = (num); \
42 register long _arg1 __asm__ ("x0") = (long)(arg1); \
43 \
44 __asm__ volatile ( \
45 "svc #0\n" \
46 : "=r"(_arg1) \
47 : "r"(_arg1), \
48 "r"(_num) \
49 : "memory", "cc" \
50 ); \
51 _arg1; \
52})
53
54#define my_syscall2(num, arg1, arg2) \
55({ \
56 register long _num __asm__ ("x8") = (num); \
57 register long _arg1 __asm__ ("x0") = (long)(arg1); \
58 register long _arg2 __asm__ ("x1") = (long)(arg2); \
59 \
60 __asm__ volatile ( \
61 "svc #0\n" \
62 : "=r"(_arg1) \
63 : "r"(_arg1), "r"(_arg2), \
64 "r"(_num) \
65 : "memory", "cc" \
66 ); \
67 _arg1; \
68})
69
70#define my_syscall3(num, arg1, arg2, arg3) \
71({ \
72 register long _num __asm__ ("x8") = (num); \
73 register long _arg1 __asm__ ("x0") = (long)(arg1); \
74 register long _arg2 __asm__ ("x1") = (long)(arg2); \
75 register long _arg3 __asm__ ("x2") = (long)(arg3); \
76 \
77 __asm__ volatile ( \
78 "svc #0\n" \
79 : "=r"(_arg1) \
80 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
81 "r"(_num) \
82 : "memory", "cc" \
83 ); \
84 _arg1; \
85})
86
87#define my_syscall4(num, arg1, arg2, arg3, arg4) \
88({ \
89 register long _num __asm__ ("x8") = (num); \
90 register long _arg1 __asm__ ("x0") = (long)(arg1); \
91 register long _arg2 __asm__ ("x1") = (long)(arg2); \
92 register long _arg3 __asm__ ("x2") = (long)(arg3); \
93 register long _arg4 __asm__ ("x3") = (long)(arg4); \
94 \
95 __asm__ volatile ( \
96 "svc #0\n" \
97 : "=r"(_arg1) \
98 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
99 "r"(_num) \
100 : "memory", "cc" \
101 ); \
102 _arg1; \
103})
104
105#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
106({ \
107 register long _num __asm__ ("x8") = (num); \
108 register long _arg1 __asm__ ("x0") = (long)(arg1); \
109 register long _arg2 __asm__ ("x1") = (long)(arg2); \
110 register long _arg3 __asm__ ("x2") = (long)(arg3); \
111 register long _arg4 __asm__ ("x3") = (long)(arg4); \
112 register long _arg5 __asm__ ("x4") = (long)(arg5); \
113 \
114 __asm__ volatile ( \
115 "svc #0\n" \
116 : "=r" (_arg1) \
117 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
118 "r"(_num) \
119 : "memory", "cc" \
120 ); \
121 _arg1; \
122})
123
124#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
125({ \
126 register long _num __asm__ ("x8") = (num); \
127 register long _arg1 __asm__ ("x0") = (long)(arg1); \
128 register long _arg2 __asm__ ("x1") = (long)(arg2); \
129 register long _arg3 __asm__ ("x2") = (long)(arg3); \
130 register long _arg4 __asm__ ("x3") = (long)(arg4); \
131 register long _arg5 __asm__ ("x4") = (long)(arg5); \
132 register long _arg6 __asm__ ("x5") = (long)(arg6); \
133 \
134 __asm__ volatile ( \
135 "svc #0\n" \
136 : "=r" (_arg1) \
137 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
138 "r"(_arg6), "r"(_num) \
139 : "memory", "cc" \
140 ); \
141 _arg1; \
142})
143
144/* startup code */
145void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
146{
147 __asm__ volatile (
148 "mov x0, sp\n" /* save stack pointer to x0, as arg1 of _start_c */
149 "and sp, x0, -16\n" /* sp must be 16-byte aligned in the callee */
150 "bl _start_c\n" /* transfer to c runtime */
151 );
152 __builtin_unreachable();
153}
154#endif /* _NOLIBC_ARCH_AARCH64_H */
1/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2/*
3 * AARCH64 specific definitions for NOLIBC
4 * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
5 */
6
7#ifndef _NOLIBC_ARCH_AARCH64_H
8#define _NOLIBC_ARCH_AARCH64_H
9
10/* O_* macros for fcntl/open are architecture-specific */
11#define O_RDONLY 0
12#define O_WRONLY 1
13#define O_RDWR 2
14#define O_CREAT 0x40
15#define O_EXCL 0x80
16#define O_NOCTTY 0x100
17#define O_TRUNC 0x200
18#define O_APPEND 0x400
19#define O_NONBLOCK 0x800
20#define O_DIRECTORY 0x4000
21
22/* The struct returned by the newfstatat() syscall. Differs slightly from the
23 * x86_64's stat one by field ordering, so be careful.
24 */
25struct sys_stat_struct {
26 unsigned long st_dev;
27 unsigned long st_ino;
28 unsigned int st_mode;
29 unsigned int st_nlink;
30 unsigned int st_uid;
31 unsigned int st_gid;
32
33 unsigned long st_rdev;
34 unsigned long __pad1;
35 long st_size;
36 int st_blksize;
37 int __pad2;
38
39 long st_blocks;
40 long st_atime;
41 unsigned long st_atime_nsec;
42 long st_mtime;
43
44 unsigned long st_mtime_nsec;
45 long st_ctime;
46 unsigned long st_ctime_nsec;
47 unsigned int __unused[2];
48};
49
50/* Syscalls for AARCH64 :
51 * - registers are 64-bit
52 * - stack is 16-byte aligned
53 * - syscall number is passed in x8
54 * - arguments are in x0, x1, x2, x3, x4, x5
55 * - the system call is performed by calling svc 0
56 * - syscall return comes in x0.
57 * - the arguments are cast to long and assigned into the target registers
58 * which are then simply passed as registers to the asm code, so that we
59 * don't have to experience issues with register constraints.
60 *
61 * On aarch64, select() is not implemented so we have to use pselect6().
62 */
63#define __ARCH_WANT_SYS_PSELECT6
64
65#define my_syscall0(num) \
66({ \
67 register long _num __asm__ ("x8") = (num); \
68 register long _arg1 __asm__ ("x0"); \
69 \
70 __asm__ volatile ( \
71 "svc #0\n" \
72 : "=r"(_arg1) \
73 : "r"(_num) \
74 : "memory", "cc" \
75 ); \
76 _arg1; \
77})
78
79#define my_syscall1(num, arg1) \
80({ \
81 register long _num __asm__ ("x8") = (num); \
82 register long _arg1 __asm__ ("x0") = (long)(arg1); \
83 \
84 __asm__ volatile ( \
85 "svc #0\n" \
86 : "=r"(_arg1) \
87 : "r"(_arg1), \
88 "r"(_num) \
89 : "memory", "cc" \
90 ); \
91 _arg1; \
92})
93
94#define my_syscall2(num, arg1, arg2) \
95({ \
96 register long _num __asm__ ("x8") = (num); \
97 register long _arg1 __asm__ ("x0") = (long)(arg1); \
98 register long _arg2 __asm__ ("x1") = (long)(arg2); \
99 \
100 __asm__ volatile ( \
101 "svc #0\n" \
102 : "=r"(_arg1) \
103 : "r"(_arg1), "r"(_arg2), \
104 "r"(_num) \
105 : "memory", "cc" \
106 ); \
107 _arg1; \
108})
109
110#define my_syscall3(num, arg1, arg2, arg3) \
111({ \
112 register long _num __asm__ ("x8") = (num); \
113 register long _arg1 __asm__ ("x0") = (long)(arg1); \
114 register long _arg2 __asm__ ("x1") = (long)(arg2); \
115 register long _arg3 __asm__ ("x2") = (long)(arg3); \
116 \
117 __asm__ volatile ( \
118 "svc #0\n" \
119 : "=r"(_arg1) \
120 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
121 "r"(_num) \
122 : "memory", "cc" \
123 ); \
124 _arg1; \
125})
126
127#define my_syscall4(num, arg1, arg2, arg3, arg4) \
128({ \
129 register long _num __asm__ ("x8") = (num); \
130 register long _arg1 __asm__ ("x0") = (long)(arg1); \
131 register long _arg2 __asm__ ("x1") = (long)(arg2); \
132 register long _arg3 __asm__ ("x2") = (long)(arg3); \
133 register long _arg4 __asm__ ("x3") = (long)(arg4); \
134 \
135 __asm__ volatile ( \
136 "svc #0\n" \
137 : "=r"(_arg1) \
138 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
139 "r"(_num) \
140 : "memory", "cc" \
141 ); \
142 _arg1; \
143})
144
145#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
146({ \
147 register long _num __asm__ ("x8") = (num); \
148 register long _arg1 __asm__ ("x0") = (long)(arg1); \
149 register long _arg2 __asm__ ("x1") = (long)(arg2); \
150 register long _arg3 __asm__ ("x2") = (long)(arg3); \
151 register long _arg4 __asm__ ("x3") = (long)(arg4); \
152 register long _arg5 __asm__ ("x4") = (long)(arg5); \
153 \
154 __asm__ volatile ( \
155 "svc #0\n" \
156 : "=r" (_arg1) \
157 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
158 "r"(_num) \
159 : "memory", "cc" \
160 ); \
161 _arg1; \
162})
163
164#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
165({ \
166 register long _num __asm__ ("x8") = (num); \
167 register long _arg1 __asm__ ("x0") = (long)(arg1); \
168 register long _arg2 __asm__ ("x1") = (long)(arg2); \
169 register long _arg3 __asm__ ("x2") = (long)(arg3); \
170 register long _arg4 __asm__ ("x3") = (long)(arg4); \
171 register long _arg5 __asm__ ("x4") = (long)(arg5); \
172 register long _arg6 __asm__ ("x5") = (long)(arg6); \
173 \
174 __asm__ volatile ( \
175 "svc #0\n" \
176 : "=r" (_arg1) \
177 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
178 "r"(_arg6), "r"(_num) \
179 : "memory", "cc" \
180 ); \
181 _arg1; \
182})
183
184/* startup code */
185__asm__ (".section .text\n"
186 ".weak _start\n"
187 "_start:\n"
188 "ldr x0, [sp]\n" // argc (x0) was in the stack
189 "add x1, sp, 8\n" // argv (x1) = sp
190 "lsl x2, x0, 3\n" // envp (x2) = 8*argc ...
191 "add x2, x2, 8\n" // + 8 (skip null)
192 "add x2, x2, x1\n" // + argv
193 "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee
194 "bl main\n" // main() returns the status code, we'll exit with it.
195 "mov x8, 93\n" // NR_exit == 93
196 "svc #0\n"
197 "");
198
199#endif // _NOLIBC_ARCH_AARCH64_H